hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k โ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 โ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 โ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k โ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 โ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 โ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k โ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 โ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 โ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8caee3dbd16da48114fd3363e6f670aa34f3a008 | 3,456 | py | Python | main.py | victorsevero/sevs-move | 62f513c7184cd56c033bda421dec16b702fb1b04 | [
"MIT"
] | null | null | null | main.py | victorsevero/sevs-move | 62f513c7184cd56c033bda421dec16b702fb1b04 | [
"MIT"
] | null | null | null | main.py | victorsevero/sevs-move | 62f513c7184cd56c033bda421dec16b702fb1b04 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import os
import shutil
import image_slicer
import PIL.Image
from pynput.keyboard import Key, Controller
import subprocess
import pandas as pd
from tkinter import *
import time
# from IPython import embed
PATH_BG = 'icons_with_bg\\'
PATH_SLICES = 'slices\\'
# def interface():
# top = Tk()
# top.title('Sevs Move')
# top.geometry('800x600')
# # top.wm_iconbitmap('icon.ico')
#
# b = Button(top, text='Configurar time', command=ask_pokemons)
# b.pack()
#
# B = Button(top, text='Scan & Fill', command=scan_e_preenchimento)
#
# top.mainloop()
def take_screenshot():
subprocess.call('screenshot.bat')
def slice_screenshot(screen):
dim = 175
y, x = 714, 15
crop = screen[y:y+6*dim, x:x+6*dim]
cv2.imwrite('board.png', crop)
tiles = image_slicer.slice('board.png', 36, save=False)
image_slicer.save_tiles(tiles, directory=PATH_SLICES)
slices_dir = os.listdir(PATH_SLICES)
return slices_dir
def ask_pokemons():
pkmn_list = []
dex = pd.read_csv('pokemon.csv')
dex = dex.set_index('identifier')
name_to_number = dex.to_dict()
name_to_number = name_to_number['species_id']
n = int(input('Quantos pokรฉmon/disruptions hรก no total? '))
for i in range(n):
pkmn = input('Qual o %d pokรฉmon/disruption? ' % (i + 1)).lower()
pkmn_list.append(pkmn)
pkmn_list = ['%s.png' % str(name_to_number[pkmn]).zfill(3) for pkmn in pkmn_list]
return pkmn_list
def resize128(file):
basedim = 128
img = PIL.Image.open(file)
wpercent = (basedim/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((basedim,hsize), PIL.Image.ANTIALIAS)
img.save(file)
def convert_png_to_key(pkmn_list, board):
key_list = ['a', 's', 'e', 'r', 'g',
'd', 'w', 't', 'q', 'b',
'c', 'x', 'v', 'z', 'h',
'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'u', 'y']
df = pd.read_csv('pokemon.csv')
df = df.set_index('species_id')
png_to_key = dict()
for pkmn in pkmn_list:
png_to_key[pkmn] = key_list[pkmn_list.index(pkmn)]
board = [png_to_key[pkmn] for pkmn in board]
return board
def match_board(slices_dir, pkmn_imgs):
board = []
for slice in slices_dir:
resize128(PATH_SLICES + slice)
slice_img = cv2.imread(PATH_SLICES + slice)
res_match = 0
best_match = ''
for pkmn in pkmn_imgs:
png = cv2.imread(PATH_BG + pkmn)
res = cv2.matchTemplate(slice_img, png, cv2.TM_CCOEFF_NORMED)
if res > res_match:
res_match = res
best_match = pkmn
board.append(best_match)
key_board = convert_png_to_key(pkmn_imgs, board)
return key_board
def scan_e_preenchimento():
if not os.path.exists(PATH_SLICES):
os.makedirs(PATH_SLICES)
# screen = cv2.imread('screen.png')
screen = take_screenshot()
slices_dir = slice_screenshot(screen)
# pkmn_imgs = ['302.png', '491.png', '649.png', '717.png']
pkmn_imgs = ask_pokemons()
board = match_board(slices_dir, pkmn_imgs)
keyboard = Controller()
time.sleep(2)
for key in board:
keyboard.press(key)
keyboard.release(key)
shutil.rmtree(PATH_SLICES)
os.remove('screen.png')
os.remove('board.png')
if __name__ == '__main__':
scan_e_preenchimento()
| 24.167832 | 85 | 0.618634 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 746 | 0.215669 |
8caf6ff99d80c76a542d8a7fdffc2f654af14a1f | 42 | py | Python | helpers/__init__.py | sspbft/odin | 30681d314748523cfbade15fc365510c78bbc711 | [
"MIT"
] | null | null | null | helpers/__init__.py | sspbft/odin | 30681d314748523cfbade15fc365510c78bbc711 | [
"MIT"
] | null | null | null | helpers/__init__.py | sspbft/odin | 30681d314748523cfbade15fc365510c78bbc711 | [
"MIT"
] | null | null | null | """Package containing various helpers."""
| 21 | 41 | 0.738095 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.97619 |
8cb1703d69bcca54d3660bd5eb995176d608f088 | 1,790 | py | Python | ch04/ex05.py | lee-hyeonseung/lab_dl | b8906247b6e0e2586f538081e2efaf47dac34972 | [
"MIT"
] | 1 | 2020-01-08T09:14:46.000Z | 2020-01-08T09:14:46.000Z | ch04/ex05.py | lee-hyeonseung/lab_dl | b8906247b6e0e2586f538081e2efaf47dac34972 | [
"MIT"
] | null | null | null | ch04/ex05.py | lee-hyeonseung/lab_dl | b8906247b6e0e2586f538081e2efaf47dac34972 | [
"MIT"
] | null | null | null | import numpy as np
def numerical_diff(fn, x):
""" Numerical Differential
ํจ์ fn๊ณผ ์ x๊ฐ ์ฃผ์ด์ก์ ๋, x์์์ ํจ์ fn์ ๋ฏธ๋ถ(๋ํจ์) ๊ฐ"""
h = 1e-4 # 0.0001
return (fn(x + h) - fn(x - h)) / (2 * h)
def f1(x):
return 0.001 * x **2 + 0.01 * x
def f1_prime(x):
"""๊ทผ์ฌ๊ฐ์ ์ฌ์ฉํ์ง ์์ ํจ์ f1์ ๋ํจ์"""
return 0.002 * x + 0.01
def f2(x):
"""x = [x0, x1, ... ]"""
return np.sum(x**2) # x0**2 + x1**2 + ...
def partial_gradient(fn, x):
"""ndarray x = [x0, x1, ..., xn]์์์ ํจ์ fn = fn(x0, x1, ..., xn)์
๊ฐ ํธ๋ฏธ๋ถ ๊ฐ๋ค์ ๋ฐฐ์ด์ ๋ฆฌํด"""
x = x.astype(np.float) # ์ค์ ํ์
์ผ๋ก ๋ณ๊ฒฝ
# gradient: np.zeros(shape=x.shape) >> ์์ n๊ฐ์ 0์ผ๋ก ๋ array ์์ฑ
gradient = np.zeros_like(x)
h = 1e-4 # 0.0001
for i in range(x.size):
ith_val=x[i]
x[i] = ith_val+h
fh1 = fn(x)
x[i] = ith_val-h
fh2 = fn(x)
gradient[i] = (fh1 - fh2) / (2*h)
return gradient
def f3(x):
return x[0] + (x[1] ** 2) + (x[2] ** 3)
def f4(x):
return x[0]**2 + 2*x[0]*x[1] + x[1]*2
if __name__ == '__main__':
estimate = numerical_diff(f1, 3)
print('๊ทผ์ฌ๊ฐ:', estimate)
real = f1_prime(3)
print('์ค์ ๊ฐ:', real)
# f2 ํจ์์ ์ (3, 4)์์์ ํธ๋ฏธ๋ถ ๊ฐ
estimate_1 = numerical_diff(lambda x: x**2 + 4**2, 3)
print(estimate_1)
estimate_2 = numerical_diff(lambda x: 3**2 + x**2, 4)
print(estimate_2)
# f2 ํจ์์ ์ (3, 4)์์์ ํธ๋ฏธ๋ถ ๊ฐ
gradient = partial_gradient(f2, np.array([3, 4]))
print(gradient)
# f3 = x0 + x1 ** 2 + x2 ** 3
# ์ (1, 1, 1) ์์์ ๊ฐ ํธ๋ฏธ๋ถ๋ค์ ๊ฐ
# df/dx0 = 1, df/dx1 = 2, df/dx3 = 3
gradient2 = partial_gradient(f3, np.array([1, 1, 1]))
print(gradient2)
# f4 = x0**2 + 2 * x0 * x1 + x1**2
# ์ (1, 2)์์์ df/dx0 = ?, df/dx1 =?
gradient3 = partial_gradient(f4, np.array([1, 2]))
print(gradient3) | 24.520548 | 68 | 0.520112 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 790 | 0.390702 |
8cb71fe9be3f0683efa81efd43d2bbd8f86fa89d | 5,628 | py | Python | pureport_client/commands/accounts/audit_log.py | ellievaughn/pureport-python-client | e0c80c7200549723820169da3d137dd771be3f11 | [
"MIT"
] | 4 | 2019-04-29T17:45:41.000Z | 2019-07-24T15:47:04.000Z | pureport_client/commands/accounts/audit_log.py | ellievaughn/pureport-python-client | e0c80c7200549723820169da3d137dd771be3f11 | [
"MIT"
] | 17 | 2019-05-27T23:35:31.000Z | 2020-10-26T11:41:10.000Z | pureport_client/commands/accounts/audit_log.py | ellievaughn/pureport-python-client | e0c80c7200549723820169da3d137dd771be3f11 | [
"MIT"
] | 2 | 2020-07-14T11:40:32.000Z | 2020-09-23T19:51:53.000Z | # -*- coding: utf-8 -*_
#
# Copyright (c) 2020, Pureport, Inc.
# All Rights Reserved
from __future__ import absolute_import
from click import (
option,
Choice
)
from pureport_client.helpers import format_date
from pureport_client.commands import (
CommandBase,
AccountsMixin
)
EVENT_TYPES = ('USER_LOGIN', 'USER_FORGOT_PASSWORD', 'API_LOGIN',
'ACCOUNT_CREATE', 'ACCOUNT_UPDATE', 'ACCOUNT_DELETE',
'ACCOUNT_BILLING_CREATE', 'ACCOUNT_BILLING_UPDATE',
'ACCOUNT_BILLING_DELETE', 'NETWORK_CREATE',
'NETWORK_UPDATE', 'NETWORK_DELETE', 'CONNECTION_CREATE',
'CONNECTION_UPDATE', 'CONNECTION_DELETE',
'GATEWAY_CREATE', 'GATEWAY_UPDATE', 'GATEWAY_DELETE',
'API_KEY_CREATE', 'API_KEY_UPDATE', 'API_KEY_DELETE',
'ROLE_CREATE', 'ROLE_UPDATE', 'ROLE_DELETE', 'USER_CREATE',
'USER_UPDATE', 'USER_DELETE', 'USER_DOMAIN_CREATE',
'USER_DOMAIN_UPDATE', 'USER_DOMAIN_DELETE', 'PORT_CREATE',
'PORT_UPDATE', 'PORT_DELETE', 'MEMBER_INVITE_CREATE',
'MEMBER_INVITE_ACCEPT', 'MEMBER_INVITE_UPDATE',
'MEMBER_INVITE_DELETE', 'ACCOUNT_MEMBER_CREATE',
'ACCOUNT_MEMBER_UPDATE', 'ACCOUNT_MEMBER_DELETE',
'CONNECTION_STATE_CHANGE', 'GATEWAY_STATE_CHANGE',
'GATEWAY_BGP_STATUS_CHANGE', 'GATEWAY_IPSEC_STATUS_CHANGE',
'NOTIFICATION_CREATE', 'NOTIFICATION_UPDATE',
'NOTIFICATION_DELETE', 'TASK_CREATE',
'TASK_UPDATE', 'TASK_DELETE')
SUBJECT_TYPES = ('ACCOUNT', 'CONNECTION', 'NETWORK', 'USER',
'USER_DOMAIN', 'ROLE', 'API_KEY', 'GATEWAY',
'NOTIFICATION', 'ACCOUNT_INVITE', 'ACCOUNT_BILLING',
'PORT', 'ACCOUNT_MEMBER', 'TASK')
SORT_CHOICES = ('timestamp', 'eventType', 'subjectType', 'ipAddress',
'userAgent', 'source', 'result')
class Command(AccountsMixin, CommandBase):
"""Display Pureport account audit log details
"""
@option('-pn', '--page_number', type=int, help='The page number for pagination.')
@option('-ps', '--page_size', type=int, help='The page size for pagination.')
@option('-s', '--sort', type=Choice(SORT_CHOICES),
help='How should the data be sorted.')
@option('-sd', '--sort_direction', type=Choice(['ASC', 'DESC']),
help='The direction the results will be sorted.')
@option('-st', '--start_time',
help='The start time for selecting results between a time range.')
@option('-et', '--end_time',
help='The end time for selecting results between a time range.')
@option('-i', '--include_child_accounts', is_flag=True,
help='If the results should include entries from child accounts.')
@option('-ev', '--event_types', type=Choice(EVENT_TYPES),
help='Limit the results to particular event types.')
@option('-r', '--result', type=Choice(('SUCCESS', 'FAILURE')),
help='If the result was successful or not.')
@option('-pi', '--principal_id',
help='The principal id, e.g. user or api key id.')
@option('-ci', '--correlation_id',
help='The correlation id, e.g. id of audit event to surface related events.')
@option('-si', '--subject_id',
help='The subject id, e.g. id of audit subject '
'(connection, network, etc.) to surface related events.')
@option('-su', '--subject_type', type=Choice(SUBJECT_TYPES),
help='The subject type')
@option('-ics', '--include_child_subjects', is_flag=True,
help='If the results should include entries from child subjects from the subject id.')
def query(self, page_number=None, page_size=None, sort=None, sort_direction=None,
start_time=None, end_time=None, include_child_accounts=None, event_types=None,
result=None, principal_id=None, ip_address=None, correlation_id=None, subject_id=None,
subject_type=None, include_child_subjects=None):
"""
Query the audit log for this account.
\f
:param int page_number:
:param int page_size:
:param str sort:
:param str sort_direction:
:param str start_time: formatted as 'YYYY-MM-DDT00:00:00.000Z'
:param str end_time: formatted as 'YYYY-MM-DDT00:00:00.000Z'
:param bool include_child_accounts:
:param list[str] event_types:
:param str result:
:param str principal_id:
:param str ip_address:
:param str correlation_id:
:param str subject_id:
:param str subject_type:
:param bool include_child_subjects:
:rtype: Page[AuditEntry]
:raises: .exception.ClientHttpError
"""
params = {
'pageNumber': page_number,
'pageSize': page_size,
'sort': sort,
'sortDirection': sort_direction,
'startTime': format_date(start_time),
'endTime': format_date(end_time),
'includeChildAccounts': include_child_accounts,
'eventTypes': event_types,
'result': result,
'principalId': principal_id,
'ipAddress': ip_address,
'correlationId': correlation_id,
'subjectId': subject_id,
'subjectType': subject_type,
'includeChildSubjects': include_child_subjects
}
kwargs = {'query': dict(((k, v) for k, v in params.items() if v))}
return self.__call__('get', 'auditLog', **kwargs)
| 43.96875 | 100 | 0.614428 | 3,655 | 0.649431 | 0 | 0 | 3,549 | 0.630597 | 0 | 0 | 3,231 | 0.574094 |
8cb73515afe2c119b5f6d38d0a2b556bf02c8217 | 3,694 | py | Python | extras.py | pyaf/severstal-steel-defect-detection | 68a0df4164e84803b6cba78597a079d3736b4e00 | [
"MIT"
] | null | null | null | extras.py | pyaf/severstal-steel-defect-detection | 68a0df4164e84803b6cba78597a079d3736b4e00 | [
"MIT"
] | null | null | null | extras.py | pyaf/severstal-steel-defect-detection | 68a0df4164e84803b6cba78597a079d3736b4e00 | [
"MIT"
] | null | null | null | import os
from tqdm import tqdm
import numpy as np
import pandas as pd
import os
import pdb
import cv2
import time
import json
import torch
import random
import scipy
import logging
import traceback
import numpy as np
from datetime import datetime
# from config import HOME
from tensorboard_logger import log_value, log_images
from matplotlib import pyplot as plt
plt.switch_backend("agg")
def logger_init(save_folder):
mkdir(save_folder)
logging.basicConfig(
filename=os.path.join(save_folder, "log.txt"),
filemode="a",
level=logging.DEBUG,
format="%(asctime)s %(message)s",
datefmt="%H:%M:%S",
)
console = logging.StreamHandler()
logger = logging.getLogger(__name__)
logger.addHandler(console)
return logger
def plot_ROC(roc, targets, predictions, phase, epoch, folder):
roc_plot_folder = os.path.join(folder, "ROC_plots")
mkdir(os.path.join(roc_plot_folder))
fpr, tpr, thresholds = roc_curve(targets, predictions)
roc_plot_name = "ROC_%s_%s_%0.4f" % (phase, epoch, roc)
roc_plot_path = os.path.join(roc_plot_folder, roc_plot_name + ".jpg")
fig = plt.figure(figsize=(10, 5))
plt.plot([0, 1], [0, 1], linestyle="--")
plt.plot(fpr, tpr, marker=".")
plt.legend(["diagonal-line", roc_plot_name])
fig.savefig(roc_plot_path, bbox_inches="tight", pad_inches=0)
plt.close(fig) # see footnote [1]
plot = cv2.imread(roc_plot_path)
log_images(roc_plot_name, [plot], epoch)
def print_time(log, start, string):
diff = time.time() - start
log(string + ": %02d:%02d" % (diff // 60, diff % 60))
def iter_log(log, phase, epoch, iteration, epoch_size, loss, start):
diff = time.time() - start
log(
"%s epoch: %d (%d/%d) loss: %.4f || %02d:%02d",
phase,
epoch,
iteration,
epoch_size,
loss.item(),
diff // 60,
diff % 60,
)
def mkdir(folder):
if not os.path.exists(folder):
os.mkdir(folder)
def save_hyperparameters(trainer, remark):
hp_file = os.path.join(trainer.save_folder, "parameters.txt")
time_now = datetime.now()
augmentations = trainer.dataloaders["train"].dataset.transforms.transforms
# pdb.set_trace()
string_to_write = (
f"Time: {time_now}\n"
+ f"model_name: {trainer.model_name}\n"
+ f"train_df_name: {trainer.train_df_name}\n"
#+ f"images_folder: {trainer.images_folder}\n"
+ f"resume: {trainer.resume}\n"
+ f"pretrained: {trainer.pretrained}\n"
+ f"pretrained_path: {trainer.pretrained_path}\n"
+ f"folder: {trainer.folder}\n"
+ f"fold: {trainer.fold}\n"
+ f"total_folds: {trainer.total_folds}\n"
+ f"num_samples: {trainer.num_samples}\n"
+ f"sampling class weights: {trainer.class_weights}\n"
+ f"size: {trainer.size}\n"
+ f"top_lr: {trainer.top_lr}\n"
+ f"base_lr: {trainer.base_lr}\n"
+ f"num_workers: {trainer.num_workers}\n"
+ f"batchsize: {trainer.batch_size}\n"
+ f"momentum: {trainer.momentum}\n"
+ f"mean: {trainer.mean}\n"
+ f"std: {trainer.std}\n"
+ f"start_epoch: {trainer.start_epoch}\n"
+ f"augmentations: {augmentations}\n"
+ f"criterion: {trainer.criterion}\n"
+ f"optimizer: {trainer.optimizer}\n"
+ f"remark: {remark}\n"
)
with open(hp_file, "a") as f:
f.write(string_to_write)
print(string_to_write)
def seed_pytorch(seed=69):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
| 28.635659 | 78 | 0.636167 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,126 | 0.304819 |
8cb9d99f25ca4d88bf464b8e38a7ea2815155e6f | 762 | py | Python | polling_stations/apps/data_importers/management/commands/import_scarborough.py | DemocracyClub/UK-Polling-Stations | d5c428fc7fbccf0c13a84fa0045dfd332b2879e7 | [
"BSD-3-Clause"
] | 29 | 2015-03-10T08:41:34.000Z | 2022-01-12T08:51:38.000Z | polling_stations/apps/data_importers/management/commands/import_scarborough.py | DemocracyClub/UK-Polling-Stations | d5c428fc7fbccf0c13a84fa0045dfd332b2879e7 | [
"BSD-3-Clause"
] | 4,112 | 2015-04-01T21:27:38.000Z | 2022-03-31T19:22:11.000Z | polling_stations/apps/data_importers/management/commands/import_scarborough.py | DemocracyClub/UK-Polling-Stations | d5c428fc7fbccf0c13a84fa0045dfd332b2879e7 | [
"BSD-3-Clause"
] | 31 | 2015-03-18T14:52:50.000Z | 2022-02-24T10:31:07.000Z | from data_importers.management.commands import BaseHalaroseCsvImporter
class Command(BaseHalaroseCsvImporter):
council_id = "SCE"
addresses_name = "2021-11-10T10:12:49.277177/polling_station_export-2021-11-10.csv"
stations_name = "2021-11-10T10:12:49.277177/polling_station_export-2021-11-10.csv"
elections = ["2021-11-25"]
def address_record_to_dict(self, record):
if record.housepostcode in [
"YO21 1SU",
"YO12 5DB",
"YO14 9EW",
"YO21 3JU",
"YO21 3FP",
"YO11 3PQ",
]:
return None
if record.uprn == "10023875937": # 3 POSTGATE WAY, UGTHORPE, WHITBY
return None
return super().address_record_to_dict(record)
| 30.48 | 87 | 0.620735 | 688 | 0.902887 | 0 | 0 | 0 | 0 | 0 | 0 | 256 | 0.335958 |
8cba3acfbc1a50238c1a027941fbc4491ae9149b | 1,178 | py | Python | ccfx/scripts/testthreadingutil.py | ytetsuwo/ccfinder-core | e20f390e8d26f900c68d8656b9cfb9cbaa3716d9 | [
"MIT"
] | 2 | 2019-10-27T08:01:19.000Z | 2021-12-20T07:53:02.000Z | ccfx/scripts/testthreadingutil.py | ytetsuwo/ccfinder-core | e20f390e8d26f900c68d8656b9cfb9cbaa3716d9 | [
"MIT"
] | 5 | 2019-05-02T16:36:39.000Z | 2019-05-12T16:04:45.000Z | ccfx/scripts/testthreadingutil.py | ytetsuwo/ccfinder-core | e20f390e8d26f900c68d8656b9cfb9cbaa3716d9 | [
"MIT"
] | 2 | 2019-08-04T13:21:51.000Z | 2021-03-07T00:18:36.000Z |
import threadingutil
import sys
import random
import time
random.seed(0)
def f(v): # this function must be declared at global scope, in order to make it visible to subprocess.
time.sleep(random.random() * 2.0)
return v * v
if __name__ == '__main__':
usage = "Usage: testthreadingutil.py [NUMWORKER [INPUTSIZE]]"
numWorker = 4
inputSize = 30
if len(sys.argv) >= 2:
if sys.argv[1] == "-h":
print usage
sys.exit(0)
numWorker = int(sys.argv[1])
if len(sys.argv) >= 3:
inputSize = int(sys.argv[2])
if len(sys.argv) >= 4:
print usage
sys.exit(1)
def genargslist(size):
for v in xrange(size):
yield ( v, )
t1 = time.time()
#for index, result in threadingutil.multithreading_iter(f, [ args for args in genargslist(inputSize) ], numWorker):
for index, result in threadingutil.multithreading_iter(f, genargslist(inputSize), numWorker):
print "index = ", index, ", result = ", result
print
print "NUMWORKER = %d, INPUTSIZE = %d" % ( numWorker, inputSize )
print "elapsed time: %g" % (time.time() - t1)
| 26.772727 | 119 | 0.596774 | 0 | 0 | 78 | 0.066214 | 0 | 0 | 0 | 0 | 347 | 0.294567 |
8cbc17eb08f7d57b4a914ccc5109e1694aa14ad8 | 648 | py | Python | src/server.py | openbrisk/brisk-runtime-python | 766cea7749f0f072eb36de1b05751a3e1c761533 | [
"MIT"
] | null | null | null | src/server.py | openbrisk/brisk-runtime-python | 766cea7749f0f072eb36de1b05751a3e1c761533 | [
"MIT"
] | null | null | null | src/server.py | openbrisk/brisk-runtime-python | 766cea7749f0f072eb36de1b05751a3e1c761533 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import logging
import sys
import imp
import os
from flask import Flask, request, abort, g
app = Flask(__name__)
@app.route('/healthz', methods=['GET'])
def healthz():
return "", 200, { 'Content-Type': 'text/plain' }
def configure_logging(logLevel):
global app
root = logging.getLogger()
root.setLevel(logLevel)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logLevel)
ch.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
app.logger.addHandler(ch)
configure_logging(logging.DEBUG)
app.logger.info("Listening port 8080 ...")
app.run(host='0.0.0.0', port=8080) | 24 | 83 | 0.695988 | 0 | 0 | 0 | 0 | 107 | 0.165123 | 0 | 0 | 141 | 0.217593 |
8cbee8bc9e37824427f878a582ea24c70d8c204c | 1,400 | py | Python | slybot/slybot/utils.py | rmcwilliams2004/mapping | 55f97e61fc558d243a0806ae8bf4a26b080026de | [
"BSD-3-Clause"
] | 8 | 2015-03-16T20:30:50.000Z | 2021-09-21T13:05:46.000Z | slybot/slybot/utils.py | btomashvili/portia | 9039140269f2cfca588a1feec6cc793cba87f202 | [
"BSD-3-Clause"
] | 1 | 2018-10-24T09:29:00.000Z | 2018-10-24T09:29:00.000Z | slybot/slybot/utils.py | btomashvili/portia | 9039140269f2cfca588a1feec6cc793cba87f202 | [
"BSD-3-Clause"
] | 4 | 2015-02-01T01:17:45.000Z | 2022-03-04T06:07:25.000Z | from urlparse import urlparse
import os
import json
from scrapely.htmlpage import HtmlPage
def iter_unique_scheme_hostname(urls):
"""Return an iterator of tuples (scheme, hostname) over the given urls,
filtering dupes
"""
scheme_hostname = set()
for x in urls:
p = urlparse(x)
scheme_hostname.add((p.scheme, p.hostname))
return list(scheme_hostname)
def open_project_from_dir(project_dir):
specs = {"spiders": {}}
with open(os.path.join(project_dir, "project.json")) as f:
specs["project"] = json.load(f)
with open(os.path.join(project_dir, "items.json")) as f:
specs["items"] = json.load(f)
with open(os.path.join(project_dir, "extractors.json")) as f:
specs["extractors"] = json.load(f)
for fname in os.listdir(os.path.join(project_dir, "spiders")):
if fname.endswith(".json"):
spider_name = os.path.splitext(fname)[0]
with open(os.path.join(project_dir, "spiders", fname)) as f:
try:
specs["spiders"][spider_name] = json.load(f)
except ValueError, e:
raise ValueError("Error parsing spider (invalid JSON): %s: %s" % (fname, e))
return specs
def htmlpage_from_response(response):
return HtmlPage(response.url, response.headers, \
response.body_as_unicode(), encoding=response.encoding)
| 36.842105 | 96 | 0.638571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.184286 |
8cbf9d0c7839d5ffdf3819b9203c774340d2b8a5 | 413 | py | Python | reppy/util.py | PLPeeters/reppy | 7a1aceb273c25b47908a6f81cdd894da72798b45 | [
"MIT"
] | 137 | 2015-02-11T17:39:16.000Z | 2022-03-03T22:04:51.000Z | reppy/util.py | PLPeeters/reppy | 7a1aceb273c25b47908a6f81cdd894da72798b45 | [
"MIT"
] | 73 | 2015-01-05T18:58:45.000Z | 2021-04-16T11:53:19.000Z | reppy/util.py | PLPeeters/reppy | 7a1aceb273c25b47908a6f81cdd894da72798b45 | [
"MIT"
] | 40 | 2015-02-27T17:22:03.000Z | 2022-02-19T16:34:43.000Z | '''Utility functions.'''
import email
def parse_date(string):
'''Return a timestamp for the provided datestring, described by RFC 7231.'''
parsed = email.utils.parsedate_tz(string)
if parsed is None:
raise ValueError("Invalid time.")
parsed = list(parsed)
# Default time zone is GMT/UTC
parsed[9] = 0 if parsed[9] is None else parsed[9]
return email.utils.mktime_tz(parsed)
| 27.533333 | 80 | 0.682809 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.35109 |
8cbfb27786acd690d2a47ce15ea1c10002446ef7 | 43,121 | py | Python | bin/design.py | broadinstitute/catch | 2fedca15f921116f580de8b2ae7ac9972932e59e | [
"MIT"
] | 58 | 2018-01-24T16:31:37.000Z | 2022-02-25T07:46:35.000Z | bin/design.py | broadinstitute/catch | 2fedca15f921116f580de8b2ae7ac9972932e59e | [
"MIT"
] | 29 | 2018-04-17T17:36:06.000Z | 2022-02-25T11:48:58.000Z | bin/design.py | broadinstitute/catch | 2fedca15f921116f580de8b2ae7ac9972932e59e | [
"MIT"
] | 16 | 2018-05-23T12:19:41.000Z | 2021-08-09T04:16:00.000Z | #!/usr/bin/env python3
"""Design probes for genome capture.
This is the main executable of CATCH for probe design.
"""
import argparse
import importlib
import logging
import os
import random
from catch import coverage_analysis
from catch import probe
from catch.filter import adapter_filter
from catch.filter import duplicate_filter
from catch.filter import fasta_filter
from catch.filter import n_expansion_filter
from catch.filter import near_duplicate_filter
from catch.filter import polya_filter
from catch.filter import probe_designer
from catch.filter import reverse_complement_filter
from catch.filter import set_cover_filter
from catch.utils import cluster
from catch.utils import ncbi_neighbors
from catch.utils import seq_io, version, log
__author__ = 'Hayden Metsky <hayden@mit.edu>'
def main(args):
logger = logging.getLogger(__name__)
# Set NCBI API key
if args.ncbi_api_key:
ncbi_neighbors.ncbi_api_key = args.ncbi_api_key
# Read the genomes from FASTA sequences
genomes_grouped = []
genomes_grouped_names = []
for ds in args.dataset:
if ds.startswith('collection:'):
# Process a collection of datasets
collection_name = ds[len('collection:'):]
try:
collection = importlib.import_module(
'catch.datasets.collections.' + collection_name)
except ImportError:
raise ValueError("Unknown dataset collection %s" %
collection_name)
for name, dataset in collection.import_all():
genomes_grouped += [seq_io.read_dataset_genomes(dataset)]
genomes_grouped_names += [name]
elif ds.startswith('download:'):
# Download a FASTA for an NCBI taxonomic ID
taxid = ds[len('download:'):]
if args.write_taxid_acc:
taxid_fn = os.path.join(args.write_taxid_acc,
str(taxid) + '.txt')
else:
taxid_fn = None
if '-' in taxid:
taxid, segment = taxid.split('-')
else:
segment = None
ds_fasta_tf = ncbi_neighbors.construct_fasta_for_taxid(taxid,
segment=segment, write_to=taxid_fn)
genomes_grouped += [seq_io.read_genomes_from_fasta(ds_fasta_tf.name)]
genomes_grouped_names += ['taxid:' + str(taxid)]
ds_fasta_tf.close()
elif os.path.isfile(ds):
# Process a custom fasta file with sequences
genomes_grouped += [seq_io.read_genomes_from_fasta(ds)]
genomes_grouped_names += [os.path.basename(ds)]
else:
# Process an individual dataset
try:
dataset = importlib.import_module(
'catch.datasets.' + ds)
except ImportError:
raise ValueError("Unknown file or dataset '%s'" % ds)
genomes_grouped += [seq_io.read_dataset_genomes(dataset)]
genomes_grouped_names += [ds]
if (args.limit_target_genomes and
args.limit_target_genomes_randomly_with_replacement):
raise Exception(("Cannot --limit-target-genomes and "
"--limit-target-genomes-randomly-with-replacement at "
"the same time"))
elif args.limit_target_genomes:
genomes_grouped = [genomes[:args.limit_target_genomes]
for genomes in genomes_grouped]
elif args.limit_target_genomes_randomly_with_replacement:
k = args.limit_target_genomes_randomly_with_replacement
genomes_grouped = [random.choices(genomes, k=k)
for genomes in genomes_grouped]
# Store the FASTA paths of blacklisted genomes
blacklisted_genomes_fasta = []
if args.blacklist_genomes:
for bg in args.blacklist_genomes:
if os.path.isfile(bg):
# Process a custom fasta file with sequences
blacklisted_genomes_fasta += [bg]
else:
# Process an individual dataset
try:
dataset = importlib.import_module(
'catch.datasets.' + bg)
except ImportError:
raise ValueError("Unknown file or dataset '%s'" % bg)
for fp in dataset.fasta_paths:
blacklisted_genomes_fasta += [fp]
# Setup and verify parameters related to probe length
if not args.lcf_thres:
args.lcf_thres = args.probe_length
if args.probe_stride > args.probe_length:
logger.warning(("PROBE_STRIDE (%d) is greater than PROBE_LENGTH "
"(%d), which is usually undesirable and may lead "
"to undefined behavior"),
args.probe_stride, args.probe_length)
if args.lcf_thres > args.probe_length:
logger.warning(("LCF_THRES (%d) is greater than PROBE_LENGTH "
"(%d), which is usually undesirable and may lead "
"to undefined behavior"),
args.lcf_thres, args.probe_length)
if args.island_of_exact_match > args.probe_length:
logger.warning(("ISLAND_OF_EXACT_MATCH (%d) is greater than "
"PROBE_LENGTH (%d), which is usually undesirable "
"and may lead to undefined behavior"),
args.island_of_exact_match, args.probe_length)
# Setup and verify parameters related to k-mer length in probe map
if args.kmer_probe_map_k:
# Check that k is sufficiently small
if args.kmer_probe_map_k > args.probe_length:
raise Exception(("KMER_PROBE_MAP_K (%d) exceeds PROBE_LENGTH "
"(%d), which is not permitted") %
(args.kmer_probe_map_k, args.probe_length))
# Use this value for the SetCoverFilter, AdapterFilter, and
# the Analyzer
kmer_probe_map_k_scf = args.kmer_probe_map_k
kmer_probe_map_k_af = args.kmer_probe_map_k
kmer_probe_map_k_analyzer = args.kmer_probe_map_k
else:
if args.probe_length <= 20:
logger.warning(("PROBE_LENGTH (%d) is small; you may want to "
"consider setting --kmer-probe-map-k to be "
"small as well in order to be more sensitive "
"in mapping candidate probes to target sequence"),
args.probe_length)
# Use a default k of 20 for the SetCoverFilter and AdapterFilter,
# and 10 for the Analyzer since we would like to be more sensitive
# (potentially at the cost of slower runtime) for the latter
kmer_probe_map_k_scf = 20
kmer_probe_map_k_af = 20
kmer_probe_map_k_analyzer = 10
# Set the maximum number of processes in multiprocessing pools
if args.max_num_processes:
probe.set_max_num_processes_for_probe_finding_pools(
args.max_num_processes)
cluster.set_max_num_processes_for_creating_distance_matrix(
args.max_num_processes)
# Raise exceptions or warn based on use of adapter arguments
if args.add_adapters:
if not (args.adapter_a or args.adapter_b):
logger.warning(("Adapter sequences will be added, but default "
"sequences will be used; to provide adapter "
"sequences, use --adapter-a and --adapter-b"))
else:
if args.adapter_a or args.adapter_b:
raise Exception(("Adapter sequences were provided with "
"--adapter-a and --adapter-b, but --add-adapters is required "
"to add adapter sequences onto the ends of probes"))
# Do not allow both --small-seq-skip and --small-seq-min, since they
# have different intentions
if args.small_seq_skip is not None and args.small_seq_min is not None:
raise Exception(("Both --small-seq-skip and --small-seq-min were "
"specified, but both cannot be used together"))
# Check arguments involving clustering
if args.cluster_and_design_separately and args.identify:
raise Exception(("Cannot use --cluster-and-design-separately with "
"--identify, because clustering collapses genome groupings into "
"one"))
if args.cluster_from_fragments and not args.cluster_and_design_separately:
raise Exception(("Cannot use --cluster-from-fragments without also "
"setting --cluster-and-design-separately"))
# Check for whether a custom hybridization function was provided
if args.custom_hybridization_fn:
custom_cover_range_fn = tuple(args.custom_hybridization_fn)
else:
custom_cover_range_fn = None
if args.custom_hybridization_fn_tolerant:
custom_cover_range_tolerant_fn = tuple(args.custom_hybridization_fn_tolerant)
else:
custom_cover_range_tolerant_fn = None
# Setup the filters
# The filters we use are, in order:
filters = []
# [Optional]
# Fasta filter (ff) -- leave out candidate probes
if args.filter_from_fasta:
ff = fasta_filter.FastaFilter(args.filter_from_fasta,
skip_reverse_complements=True)
filters += [ff]
# [Optional]
# Poly(A) filter (paf) -- leave out probes with stretches of 'A' or 'T'
if args.filter_polya:
polya_length, polya_mismatches = args.filter_polya
if polya_length > args.probe_length:
logger.warning(("Length of poly(A) stretch to filter (%d) is "
"greater than PROBE_LENGTH (%d), which is usually "
"undesirable"), polya_length, args.probe_length)
if polya_length < 10:
logger.warning(("Length of poly(A) stretch to filter (%d) is "
"short, and may lead to many probes being "
"filtered"), polya_length)
if polya_mismatches > 10:
logger.warning(("Number of mismatches to tolerate when searching "
"for poly(A) stretches (%d) is high, and may "
"lead to many probes being filtered"),
polya_mismatches)
paf = polya_filter.PolyAFilter(polya_length, polya_mismatches)
filters += [paf]
# Duplicate filter (df) -- condense all candidate probes that
# are identical down to one; this is not necessary for
# correctness, as the set cover filter achieves the same task
# implicitly, but it does significantly lower runtime by
# decreasing the input size to the set cover filter
# Near duplicate filter (ndf) -- condense candidate probes that
# are near-duplicates down to one using locality-sensitive
# hashing; like the duplicate filter, this is not necessary
# but can significantly lower runtime and reduce memory usage
# (even more than the duplicate filter)
if (args.filter_with_lsh_hamming is not None and
args.filter_with_lsh_minhash is not None):
raise Exception(("Cannot use both --filter-with-lsh-hamming "
"and --filter-with-lsh-minhash"))
if args.filter_with_lsh_hamming is not None:
if args.filter_with_lsh_hamming > args.mismatches:
logger.warning(("Setting FILTER_WITH_LSH_HAMMING (%d) to be greater "
"than MISMATCHES (%d) may cause the probes to achieve less "
"than the desired coverage"), args.filter_with_lsh_hamming,
args.mismatches)
ndf = near_duplicate_filter.NearDuplicateFilterWithHammingDistance(
args.filter_with_lsh_hamming, args.probe_length)
filters += [ndf]
elif args.filter_with_lsh_minhash is not None:
ndf = near_duplicate_filter.NearDuplicateFilterWithMinHash(
args.filter_with_lsh_minhash)
filters += [ndf]
else:
df = duplicate_filter.DuplicateFilter()
filters += [df]
# Set cover filter (scf) -- solve the problem by treating it as
# an instance of the set cover problem
scf = set_cover_filter.SetCoverFilter(
mismatches=args.mismatches,
lcf_thres=args.lcf_thres,
island_of_exact_match=args.island_of_exact_match,
mismatches_tolerant=args.mismatches_tolerant,
lcf_thres_tolerant=args.lcf_thres_tolerant,
island_of_exact_match_tolerant=args.island_of_exact_match_tolerant,
custom_cover_range_fn=custom_cover_range_fn,
custom_cover_range_tolerant_fn=custom_cover_range_tolerant_fn,
identify=args.identify,
blacklisted_genomes=blacklisted_genomes_fasta,
coverage=args.coverage,
cover_extension=args.cover_extension,
cover_groupings_separately=args.cover_groupings_separately,
kmer_probe_map_k=kmer_probe_map_k_scf,
kmer_probe_map_use_native_dict=args.use_native_dict_when_finding_tolerant_coverage)
filters += [scf]
# [Optional]
# Adapter filter (af) -- add adapters to both the 5' and 3' ends
# of each probe
if args.add_adapters:
# Set default adapter sequences, if not provided
if args.adapter_a:
adapter_a = tuple(args.adapter_a)
else:
adapter_a = ('ATACGCCATGCTGGGTCTCC', 'CGTACTTGGGAGTCGGCCAT')
if args.adapter_b:
adapter_b = tuple(args.adapter_b)
else:
adapter_b = ('AGGCCCTGGCTGCTGATATG', 'GACCTTTTGGGACAGCGGTG')
af = adapter_filter.AdapterFilter(adapter_a,
adapter_b,
mismatches=args.mismatches,
lcf_thres=args.lcf_thres,
island_of_exact_match=\
args.island_of_exact_match,
custom_cover_range_fn=\
custom_cover_range_fn,
kmer_probe_map_k=kmer_probe_map_k_af)
filters += [af]
# [Optional]
# N expansion filter (nef) -- expand Ns in probe sequences
# to avoid ambiguity
if args.expand_n is not None:
nef = n_expansion_filter.NExpansionFilter(
limit_n_expansion_randomly=args.expand_n)
filters += [nef]
# [Optional]
# Reverse complement (rc) -- add the reverse complement of each
# probe that remains
if args.add_reverse_complements:
rc = reverse_complement_filter.ReverseComplementFilter()
filters += [rc]
# If requested, don't apply the set cover filter
if args.skip_set_cover:
filter_before_scf = filters[filters.index(scf) - 1]
filters.remove(scf)
# Define parameters for clustering sequences
if args.cluster_and_design_separately:
cluster_threshold = args.cluster_and_design_separately
if args.skip_set_cover:
cluster_merge_after = filter_before_scf
else:
cluster_merge_after = scf
cluster_fragment_length = args.cluster_from_fragments
else:
cluster_threshold = None
cluster_merge_after = None
cluster_fragment_length = None
# Design the probes
pb = probe_designer.ProbeDesigner(genomes_grouped, filters,
probe_length=args.probe_length,
probe_stride=args.probe_stride,
allow_small_seqs=args.small_seq_min,
seq_length_to_skip=args.small_seq_skip,
cluster_threshold=cluster_threshold,
cluster_merge_after=cluster_merge_after,
cluster_fragment_length=cluster_fragment_length)
pb.design()
# Write the final probes to the file args.output_probes
seq_io.write_probe_fasta(pb.final_probes, args.output_probes)
if (args.print_analysis or args.write_analysis_to_tsv or
args.write_sliding_window_coverage or
args.write_probe_map_counts_to_tsv):
analyzer = coverage_analysis.Analyzer(
pb.final_probes,
args.mismatches,
args.lcf_thres,
genomes_grouped,
genomes_grouped_names,
island_of_exact_match=args.island_of_exact_match,
custom_cover_range_fn=custom_cover_range_fn,
cover_extension=args.cover_extension,
kmer_probe_map_k=kmer_probe_map_k_analyzer,
rc_too=args.add_reverse_complements)
analyzer.run()
if args.write_analysis_to_tsv:
analyzer.write_data_matrix_as_tsv(
args.write_analysis_to_tsv)
if args.write_sliding_window_coverage:
analyzer.write_sliding_window_coverage(
args.write_sliding_window_coverage)
if args.write_probe_map_counts_to_tsv:
analyzer.write_probe_map_counts(
args.write_probe_map_counts_to_tsv)
if args.print_analysis:
analyzer.print_analysis()
else:
# Just print the number of probes
print(len(pb.final_probes))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Input data
parser.add_argument('dataset',
nargs='+',
help=("One or more target datasets (e.g., one per species). Each "
"dataset can be specified in one of multiple ways. (a) If "
"dataset is in the format 'download:TAXID', then CATCH downloads "
"from NCBI all whole genomes for the NCBI taxonomy with id "
"TAXID, and uses these sequences as input. (b) If dataset is "
"a path to a FASTA file, then its sequences are read and used "
"as input. (c) Otherwise, it is assumed that this is a label "
"for a dataset included in this package (e.g., 'zika'). If "
"the label starts with 'collection:' (e.g., 'collection:viruses"
"_with_human_host'), then this reads from an available "
"collection of datasets. For segmented viruses, the format "
"for NCBI downloads can also be 'download:TAXID-SEGMENT'."))
# Outputting probes
parser.add_argument('-o', '--output-probes',
required=True,
help=("The file to which all final probes should be "
"written; they are written in FASTA format"))
# Outputting downloaed data
parser.add_argument('--write-taxid-acc',
help=("If 'download:' labels are used in datasets, write downloaded "
"accessions to a file in this directory. Accessions are written "
"to WRITE_TAXID_ACC/TAXID.txt"))
# Parameters on probe length and stride
parser.add_argument('-pl', '--probe-length',
type=int,
default=100,
help=("(Optional) Make probes be PROBE_LENGTH nt long"))
parser.add_argument('-ps', '--probe-stride',
type=int,
default=50,
help=("(Optional) Generate candidate probes from the input "
"that are separated by PROBE_STRIDE nt"))
# Parameters governing probe hybridization
parser.add_argument('-m', '--mismatches',
type=int,
default=0,
help=("(Optional) Allow for MISMATCHES mismatches when determining "
"whether a probe covers a sequence"))
parser.add_argument('-l', '--lcf-thres',
type=int,
help=("(Optional) Say that a portion of a probe covers a portion "
"of a sequence if the two share a substring with at most "
"MISMATCHES mismatches that has length >= LCF_THRES "
"nt; if unspecified, this is set to PROBE_LENGTH"))
parser.add_argument('--island-of-exact-match',
type=int,
default=0,
help=("(Optional) When determining whether a probe covers a "
"sequence, require that there be an exact match (i.e., "
"no mismatches) of length at least ISLAND_OF_EXACT_"
"MATCH nt between a portion of the probe and a portion "
"of the sequence"))
# Custom function (dynamically loaded) to determine probe hybridization
# When set, this makes values of the above arguments (--mismatches,
# --lcf-thres, and --island-of-exact-match) meaningless
parser.add_argument('--custom-hybridization-fn',
nargs=2,
help=("(Optional) Args: <PATH> <FUNC>; PATH is a path to a Python "
"module (.py file) and FUNC is a string giving the name of "
"a function in that module. FUNC provides a custom model of "
"hybridization between a probe and target sequence to use in "
"the probe set design. If this is set, the arguments "
"--mismatches, --lcf-thres, and --island-of-exact-match are "
"not used because these are meant for the default model of "
"hybridization. The function FUNC in PATH is dynamically "
"loaded to use when determining whether a probe hybridizes to "
"a target sequence (and, if so, what portion). FUNC must "
"accept the following arguments in order, though it "
"may choose to ignore some values: (1) array giving sequence "
"of a probe; (2) str giving subsequence of target sequence to "
"which the probe may hybridize, of the same length as the "
"given probe sequence; (3) int giving the position in the "
"probe (equivalently, the target subsequence) of the start "
"of a k-mer around which the probe and target subsequence "
"are anchored (the probe and target subsequence are aligned "
"using this k-mer as an anchor); (4) int giving the end "
"position (exclusive) of the anchor k-mer; (5) int giving the "
"full length of the probe (the probe provided in (1) may be "
"cutoff on an end if it extends further than where the "
"target sequence ends); (6) int giving the full length of the "
"target sequence of which the subsequence in (2) is part. "
"FUNC must return None if it deems that the probe does not "
"hybridize to the target subsequence; otherwise, it must "
"return a tuple (start, end) where start is an int giving "
"the start position in the probe (equivalently, in the "
"target subsequence) at which the probe will hybridize to "
"the target subsequence, and end is an int (exclusive) giving "
"the end position of the hybridization."))
# Desired coverage of target genomes
def check_coverage(val):
fval = float(val)
ival = int(fval)
if fval >= 0 and fval <= 1:
# a float in [0,1] giving fractional coverage
return fval
elif fval > 1 and fval == ival:
# an int > 1 giving number of bp to cover
return ival
else:
raise argparse.ArgumentTypeError(("%s is an invalid coverage "
"value") % val)
parser.add_argument('-c', '--coverage',
type=check_coverage,
default=1.0,
help=("If this is a float in [0,1], it gives the fraction of "
"each target genome that must be covered by the selected "
"probes; if this is an int > 1, it gives the number of "
"bp of each target genome that must be covered by the "
"selected probes"))
# Amount of cover extension to assume
parser.add_argument('-e', '--cover-extension',
type=int,
default=0,
help=("Extend the coverage of each side of a probe by COVER_EXTENSION "
"nt. That is, a probe covers a region that consists of the "
"portion of a sequence it hybridizes to, as well as this "
"number of nt on each side of that portion. This is useful "
"in modeling hybrid selection, where a probe hybridizes to"
"a fragment that includes the region targeted by the probe, "
"along with surrounding portions of the sequence. Increasing "
"its value should reduce the number of probes required to "
"achieve the desired coverage."))
# Differential identification and blacklisting
parser.add_argument('-i', '--identify',
dest="identify",
action="store_true",
help=("Design probes meant to make it possible to identify "
"nucleic acid from a particular input dataset against "
"the other datasets; when set, the coverage should "
"generally be small"))
parser.add_argument('--blacklist-genomes',
nargs='+',
help=("One or more blacklisted genomes; penalize probes based "
"on how much of each of these genomes they cover. If "
"the value is a path to a file, then that file is treated "
"as a FASTA file and its sequences are read. Otherwise, "
"it is assumed that this is a label for a dataset included "
"in this package (e.g., 'zika')."))
parser.add_argument('-mt', '--mismatches-tolerant',
type=int,
help=("(Optional) A more tolerant value for 'mismatches'; "
"this should be greater than the value of MISMATCHES. "
"Allows for capturing more possible hybridizations "
"(i.e., more sensitivity) when designing probes for "
"identification or when genomes are blacklisted."))
parser.add_argument('-lt', '--lcf-thres-tolerant',
type=int,
help=("(Optional) A more tolerant value for 'lcf_thres'; "
"this should be less than LCF_THRES. "
"Allows for capturing more possible hybridizations "
"(i.e., more sensitivity) when designing probes for "
"identification or when genomes are blacklisted."))
parser.add_argument('--island-of-exact-match-tolerant',
type=int,
default=0,
help=("(Optional) A more tolerant value for 'island_of_"
"exact_match'; this should be less than ISLAND_OF_ "
"EXACT_MATCH. Allows for capturing more "
"possible hybridizations (i.e., more sensitivity) "
"when designing probes for identification or when "
"genomes are blacklisted."))
parser.add_argument('--custom-hybridization-fn-tolerant',
nargs=2,
help=("(Optional) A more tolerant model than the one "
"implemented in custom_hybridization_fn. This should capture "
"more possible hybridizations (i.e., be more sensitive) "
"when designing probes for identification or when genomes "
"are blacklisted. See --custom-hybridization-fn for details "
"of how this function should be implemented and provided."))
# Outputting coverage analyses
parser.add_argument('--print-analysis',
dest="print_analysis",
action="store_true",
help="Print analysis of the probe set's coverage")
parser.add_argument('--write-analysis-to-tsv',
help=("(Optional) The file to which to write a TSV-formatted matrix "
"of the probe set's coverage analysis"))
parser.add_argument('--write-sliding-window-coverage',
help=("(Optional) The file to which to write the average coverage "
"achieved by the probe set within sliding windows of each "
"target genome"))
parser.add_argument('--write-probe-map-counts-to-tsv',
help=("(Optional) The file to which to write a TSV-formatted list of "
"the number of sequences each probe maps to. This explicitly "
"does not count reverse complements."))
# Accepting probes as input and skipping set cover process
parser.add_argument('--filter-from-fasta',
help=("(Optional) A FASTA file from which to select candidate probes. "
"Before running any other filters, keep only the candidate "
"probes that are equal to sequences in the file and remove "
"all probes not equal to any of these sequences. This, by "
"default, ignores sequences in the file whose header contains "
"the string 'reverse complement'; that is, if there is some "
"probe with sequence S, it may be filtered out (even if there "
"is a sequence S in the file) if the header of S in the file "
"contains 'reverse complement'. This is useful if we already "
"have probes decided by the set cover filter, but simply "
"want to process them further by, e.g., adding adapters or "
"running a coverage analysis. For example, if we have already "
"run the time-consuming set cover filter and have a FASTA "
"containing those probes, we can provide a path to that "
"FASTA file for this argument, and also provide the "
"--skip-set-cover argument, in order to add adapters to "
"those probes without having to re-run the set cover filter."))
parser.add_argument('--skip-set-cover',
dest="skip_set_cover",
action="store_true",
help=("Skip the set cover filter; this is useful when we "
"wish to see the probes generated from only the "
"duplicate and reverse complement filters, to gauge "
"the effects of the set cover filter"))
# Adding adapters
parser.add_argument('--add-adapters',
dest="add_adapters",
action="store_true",
help=("Add adapters to the ends of probes; to specify adapter "
"sequences, use --adapter-a and --adapter-b"))
parser.add_argument('--adapter-a',
nargs=2,
help=("(Optional) Args: <X> <Y>; Custom A adapter to use; two ordered "
"where X is the A adapter sequence to place on the 5' end of "
"a probe and Y is the A adapter sequence to place on the 3' "
"end of a probe"))
parser.add_argument('--adapter-b',
nargs=2,
help=("(Optional) Args: <X> <Y>; Custom B adapter to use; two ordered "
"where X is the B adapter sequence to place on the 5' end of "
"a probe and Y is the B adapter sequence to place on the 3' "
"end of a probe"))
# Filtering poly(A) sequence from probes
parser.add_argument('--filter-polya',
nargs=2,
type=int,
help=("(Optional) Args: <X> <Y> (integers); do not output any probe "
"that contains a stretch of X or more 'A' bases, tolerating "
"up to Y mismatches (and likewise for 'T' bases)"))
# Adjusting probe output
parser.add_argument('--add-reverse-complements',
dest="add_reverse_complements",
action="store_true",
help=("Add to the output the reverse complement of each probe"))
parser.add_argument('--expand-n',
nargs='?',
type=int,
default=None,
const=3,
help=("Expand each probe so that 'N' bases are replaced by real "
"bases; for example, the probe 'ANA' would be replaced "
"with the probes 'AAA', 'ATA', 'ACA', and 'AGA'; this is "
"done combinatorially across all 'N' bases in a probe, and "
"thus the number of new probes grows exponentially with the "
"number of 'N' bases in a probe. If followed by a command- "
"line argument (INT), this only expands at most INT randomly "
"selected N bases, and the rest are replaced with random "
"unambiguous bases (default INT is 3)."))
# Limiting input
parser.add_argument('--limit-target-genomes',
type=int,
help=("(Optional) Use only the first LIMIT_TARGET_GENOMES target "
"genomes in the dataset"))
parser.add_argument('--limit-target-genomes-randomly-with-replacement',
type=int,
help=("(Optional) Randomly select LIMIT_TARGET_GENOMES_RANDOMLY_"
"WITH_REPLACMENT target genomes in the dataset with "
"replacement"))
# Clustering input sequences
def check_cluster_and_design_separately(val):
fval = float(val)
if fval > 0 and fval <= 0.5:
# a float in (0,0.5]
return fval
else:
raise argparse.ArgumentTypeError(("%s is an invalid average "
"nucleotide dissimilarity") % val)
parser.add_argument('--cluster-and-design-separately',
type=check_cluster_and_design_separately,
help=("(Optional) If set, cluster all input sequences using their "
"MinHash signatures, design probes separately on each cluster, "
"and combine the resulting probes. This can significantly lower "
"runtime and memory usage, but may lead to a suboptimal "
"solution. The value CLUSTER_AND_DESIGN_SEPARATELY gives the "
"inter-cluster distance threshold to merge clusters (1-ANI, "
"where ANI is average nucleotide identity); higher values "
"result in fewer clusters, and thus longer runtime. Values "
"must be in (0,0.5], and generally should be around 0.1 or "
"0.2. When used, this creates a separate genome for each "
"input sequence -- it collapses all sequences, across both "
"groups and genomes, into one list of sequences in one group. "
"Therefore, genomes will not be grouped as specified in the "
"input and sequences will not be grouped by genome, and "
"differential identification is not supported"))
parser.add_argument('--cluster-from-fragments',
type=int,
help=("(Optional) If set, break all sequences into sequences of "
"length CLUSTER_FROM_FRAGMENTS nt, and cluster these fragments. "
"This can be useful for improving runtime on input with "
"especially large genomes, in which probes for different "
"fragments can be designed separately. Values should generally "
"be around 10,000. For this to be used, "
"--cluster-and-design-separately must also be set."))
# Filter candidate probes with LSH
parser.add_argument('--filter-with-lsh-hamming',
type=int,
help=("(Optional) If set, filter candidate probes for near-"
"duplicates using LSH with a family of hash functions that "
"works with Hamming distance. FILTER_WITH_LSH_HAMMING gives "
"the maximum Hamming distance at which to call near-"
"duplicates; it should be commensurate with (but not greater "
"than) MISMATCHES. Using this may significantly improve "
"runtime and reduce memory usage by reducing the number of "
"candidate probes to consider, but may lead to a slightly "
"sub-optimal solution. It may also, particularly with "
"relatively high values of FILTER_WITH_LSH_HAMMING, cause "
"coverage obtained for each genome to be slightly less than "
"the desired coverage (COVERAGE) when that desired coverage "
"is the complete genome; it is recommended to also use "
"--print-analysis or --write-analysis-to-tsv with this "
"to see the coverage that is obtained."))
def check_filter_with_lsh_minhash(val):
fval = float(val)
if fval >= 0.0 and fval <= 1.0:
# a float in [0,1]
return fval
else:
raise argparse.ArgumentTypeError(("%s is an invalid Jaccard "
"distance") % val)
parser.add_argument('--filter-with-lsh-minhash',
type=check_filter_with_lsh_minhash,
help=("(Optional) If set, filter candidate probes for near-"
"duplicates using LSH with a MinHash family. "
"FILTER_WITH_LSH_MINHASH gives the maximum Jaccard distance "
"(1 minus Jaccard similarity) at which to call near-duplicates; "
"the Jaccard similarity is calculated by treating each probe "
"as a set of overlapping 10-mers. Its value should be "
"commensurate with parameter values determining whether a probe "
"hybridizes to a target sequence, but this can be difficult "
"to measure compared to the input for --filter-with-lsh-hamming. "
"However, this allows more sensitivity in near-duplicate "
"detection than --filter-with-lsh-hamming (e.g., if near-"
"duplicates should involve probes shifted relative to each "
"other). The same caveats mentioned in help for "
"--filter-with-lsh-hamming also apply here. Values of "
"FILTER_WITH_LSH_MINHASH above ~0.7 may start to require "
"significant memory and runtime for near-duplicate detection."))
# Miscellaneous technical adjustments
parser.add_argument('--cover-groupings-separately',
dest="cover_groupings_separately",
action="store_true",
help=("Run a separate instance of set cover with the target genomes "
"from each grouping and pool (union) the resulting probes. "
"When set, the software will run faster than when not set, but "
"it may yield more probes than when it is not set."))
parser.add_argument('--small-seq-skip',
type=int,
help=("(Optional) Do not create candidate probes from sequences "
"whose length is <= SMALL_SEQ_SKIP. If set to (PROBE_LENGTH - "
"1), this avoids the error raised when sequences are less "
"than the probe length"))
parser.add_argument('--small-seq-min',
type=int,
help=("(Optional) If set, allow sequences as input that are "
"shorter than PROBE_LENGTH (when not set, the program will "
"error on such input). SMALL_SEQ_MIN is the "
"minimum sequence length that should be accepted as input. "
"When a sequence is less than PROBE_LENGTH, a candidate "
"probe is created that is equal to the sequence; thus, "
"the output probes may have different lengths. Note that, "
"when this is set, it might be a good idea to also set "
"LCF_THRES to be a value smaller than PROBE_LENGTH -- "
"e.g., the length of the shortest input sequence; otherwise, "
"when a probe of length p_l is mapped to a sequence of length "
"s_l, then lcf_thres is treated as being min(LCF_THRES, p_l, "
"s_l) so that a probe is able to 'cover' a sequence shorter "
"than the probe and so that a probe shorter than lcf_thres "
"is able to 'cover' a sequence"))
def check_max_num_processes(val):
ival = int(val)
if ival >= 1:
return ival
else:
raise argparse.ArgumentTypeError(("MAX_NUM_PROCESSES must be "
"an int >= 1"))
parser.add_argument('--max-num-processes',
type=check_max_num_processes,
help=("(Optional) An int >= 1 that gives the maximum number of "
"processes to use in multiprocessing pools; uses min(number "
"of CPUs in the system, MAX_NUM_PROCESSES) processes"))
parser.add_argument('--kmer-probe-map-k',
type=int,
help=("(Optional) Use this value (KMER_PROBE_LENGTH_K) as the "
"k-mer length when constructing a map of k-mers to the probes "
"that contain these k-mers. This map is used when mapping "
"candidate probes to target sequences and the k-mers serve "
"as seeds for calculating whether a candidate probe 'covers' "
"a subsequence. The value should be sufficiently less than "
"PROBE_LENGTH so that it can find mappings even when the "
"candidate probe and target sequence are divergent. In "
"particular, CATCH will try to find a value k >= "
"KMER_PROBE_LENGTH_K (by default, >=20) such that k divides "
"PROBE_LENGTH and k < PROBE_LENGTH / MISMATCHES (if "
"MISMATCHES=0, then k=PROBE_LENGTH). It will then use this "
"k as the k-mer length in mappings; if no such k exists, it "
"will use a randomized approach with KMER_PROBE_LENGTH_K as "
"the k-mer length. If --custom-hybridization-fn is set, "
"it will always use the randomized approach with "
"KMER_PROBE_LENGTH_K (by default, 20) as the k-mer length."))
parser.add_argument('--use-native-dict-when-finding-tolerant-coverage',
dest="use_native_dict_when_finding_tolerant_coverage",
action="store_true",
help=("When finding probe coverage for blacklisting and "
"identification (i.e., when using tolerant parameters), "
"use a native Python dict as the kmer_probe_map across "
"processes, rather than the primitives in SharedKmerProbeMap "
"that are more suited to sharing across processes. Depending "
"on the input (particularly if there are many candidate probes) "
"this may result in substantial memory usage; but it may provide "
"an improvement in runtime when there are relatively few "
"candidate probes and a very large blacklisted input"))
parser.add_argument('--ncbi-api-key',
help=("API key to use for NCBI e-utils. Using this increases the "
"limit on requests/second and may prevent an IP address "
"from being block due to too many requests"))
# Log levels and version
parser.add_argument('--debug',
dest="log_level",
action="store_const",
const=logging.DEBUG,
default=logging.WARNING,
help=("Debug output"))
parser.add_argument('--verbose',
dest="log_level",
action="store_const",
const=logging.INFO,
help=("Verbose output"))
parser.add_argument('-V', '--version',
action='version',
version=version.get_version())
args = parser.parse_args()
log.configure_logging(args.log_level)
main(args)
| 50.316219 | 91 | 0.616475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22,461 | 0.520883 |
8cc0682cc0b1ed8dabfc6863bc76066c537d9cd6 | 51 | py | Python | autograd/__init__.py | mattjj/autograd_tutorial | c32947fbd7f130d791ab1e277a0feed05cb3da5f | [
"MIT"
] | 704 | 2018-01-30T16:41:47.000Z | 2022-03-23T02:03:08.000Z | autograd/__init__.py | mattjj/autograd_tutorial | c32947fbd7f130d791ab1e277a0feed05cb3da5f | [
"MIT"
] | 6 | 2018-01-30T20:43:49.000Z | 2020-04-27T13:27:08.000Z | autograd/__init__.py | mattjj/autograd_tutorial | c32947fbd7f130d791ab1e277a0feed05cb3da5f | [
"MIT"
] | 88 | 2018-02-03T16:36:54.000Z | 2022-02-09T04:21:30.000Z | from .differential_operators import make_vjp, grad
| 25.5 | 50 | 0.862745 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
8cc0ae2e9a623354a047f26bbb36e199b6e49eb8 | 2,975 | py | Python | propara/utils/prostruct_predicted_json_to_tsv_grid.py | keisks/propara | 49fa8fe0481291df18b2c7b48e7ba1dafaad48e2 | [
"Apache-2.0"
] | 84 | 2018-06-02T02:00:53.000Z | 2022-03-13T12:17:42.000Z | propara/utils/prostruct_predicted_json_to_tsv_grid.py | keisks/propara | 49fa8fe0481291df18b2c7b48e7ba1dafaad48e2 | [
"Apache-2.0"
] | 3 | 2018-10-31T00:28:31.000Z | 2020-05-12T01:06:53.000Z | propara/utils/prostruct_predicted_json_to_tsv_grid.py | keisks/propara | 49fa8fe0481291df18b2c7b48e7ba1dafaad48e2 | [
"Apache-2.0"
] | 13 | 2018-09-14T20:37:51.000Z | 2021-03-23T09:24:49.000Z | import json
import sys
from pprint import pprint
from processes.data.propara_dataset_reader import Action
# Input: json format generated by ProparaPredictor
# paraid": "1114",
# "sentence_texts": ["Rainwater falls onto the soil.", "The rainwater seeps into the soil.",...."],
# "participants": ["rainwater; water", "bedrock", "funnels", "caves"],
# "states": [["?", "soil", "soil", "bedrock", "bedrock", "bedrock", "bedrock", "bedrock"],....],
# "predicted_actions": ["MOVE", "NONE", "NONE", "NONE", "MOVE", ..., "CREATE", "CREATE"]
#
# Output: paraid \t sentence_id \t participant \t action \t before_val \t after_val
# This class converts the json file format generated by ProparaPredictor into partial grids TSV format
def get_before_after_val(action: Action, predicted_before_location: str, predicted_after_location: str):
if action == Action.CREATE:
return '-', '?'
elif action == Action.DESTROY:
return '?', '-'
elif action == Action.MOVE:
return predicted_before_location, predicted_after_location
elif action == Action.NONE:
return '?', '?'
def convert_predicted_json_to_partial_grids(infile_path: str, outfile_path: str):
out_file = open(outfile_path, "w")
for line in open(infile_path):
data = json.loads(line)
pprint(data)
para_id = data["para_id"]
participants = data["participants"]
actions_sentences_participants = data["top1_original"]
sentence_texts = data["sentence_texts"]
num_sentences = len(sentence_texts)
num_participants = len(participants)
predicted_after_locations = data["predicted_locations"] if "predicted_locations" in data and len(data["predicted_locations"]) > 0 \
else [['?' for _ in range(num_participants)] for _ in range(num_sentences)]
print(num_sentences)
print(num_participants)
for sentence_id in range(num_sentences):
for participant_id in range(num_participants):
predicted_before_location = predicted_after_locations[sentence_id-1][participant_id] if sentence_id > 0 else '?'
predicted_after_location = predicted_after_locations[sentence_id][participant_id]
action = Action(actions_sentences_participants[sentence_id][participant_id])
(before_val, after_val) = get_before_after_val(action, predicted_before_location, predicted_after_location)
out_file.write("\t".join([para_id,
str(sentence_id+1),
participants[participant_id],
action.name,
before_val,
after_val]) + "\n")
out_file.close()
if __name__ == '__main__':
infile = sys.argv[1]
outfile = sys.argv[2]
convert_predicted_json_to_partial_grids(infile_path=infile, outfile_path=outfile) | 43.75 | 139 | 0.644034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 770 | 0.258824 |
8cc1bbd922f5bc29a45d3aa2e4e041703bf3da65 | 4,231 | py | Python | impede-app/server/py/filter_library.py | ThatSnail/impede | 126a85a4efc6d6699a383b86ffcef35743cafb33 | [
"MIT"
] | 1 | 2016-02-01T22:54:02.000Z | 2016-02-01T22:54:02.000Z | impede-app/server/py/filter_library.py | ThatSnail/impede | 126a85a4efc6d6699a383b86ffcef35743cafb33 | [
"MIT"
] | null | null | null | impede-app/server/py/filter_library.py | ThatSnail/impede | 126a85a4efc6d6699a383b86ffcef35743cafb33 | [
"MIT"
] | null | null | null |
""" Module that contains some example filters """
import numpy as np
import matplotlib.pyplot as plt
from graph import Node, Edge, Graph
from resistor import Resistor
from capacitor import Capacitor
from diode import Diode
from opamp import Opamp
from wire import Wire
from units import Units
from filter import Filter
def make_mxr_distortion_filter():
""" Return the MXR filter from:
http://www.premierguitar.com/articles/mxr-distortion-plus-mods-1
Returns:
Filter object
"""
probes = []
graph = Graph()
# Knobs
gain_param = 0.5
mix_param = 0.1
# Input / output
node_in = Node(graph, fixed=True, source=True, label="Vin")
node_out = Node(graph, output=True, label="Vout")
# Supply
node_4_5 = Node(graph, value=4.5, fixed=True, source=True, label="4.5V")
node_gnd = Node(graph, value=0, fixed=True, source=True, label="GND")
# Probe Vin
probes.append(node_in)
# Op amp plus section
edge = Edge(graph, node_in, node_gnd, label="I1")
capacitor = Capacitor(graph, .001 * Units.u, node_in, node_gnd, edge)
graph.add_component(capacitor)
node = Node(graph, label="V1")
edge = Edge(graph, node_in, node, label="I2")
#capacitor = Capacitor(graph, .01 * Units.u, node_in, node, edge)
#graph.add_component(capacitor)
wire = Wire(graph, node_in, node, edge)
graph.add_component(wire)
node_plus = Node(graph, label="V+")
edge = Edge(graph, node, node_plus, label="I3")
resistor = Resistor(graph, 10 * Units.K, node, node_plus, edge)
graph.add_component(resistor)
edge = Edge(graph, node_plus, node_4_5, label="I4")
resistor = Resistor(graph, 1 * Units.M, node_plus, node_4_5, edge)
graph.add_component(resistor)
# Op amp minus section
node = Node(graph, label="V2")
edge = Edge(graph, node, node_gnd, label="I5")
resistor = Resistor(graph, gain_param * (1 * Units.M), node, node_gnd, edge)
graph.add_component(resistor)
node_1 = Node(graph, label="V3")
edge = Edge(graph, node, node_1, label="I6")
resistor = Resistor(graph, 4.7 * Units.K, node, node_1, edge)
graph.add_component(resistor)
node_minus = Node(graph, label="V-")
edge = Edge(graph, node_1, node_minus, label="I7")
#capacitor = Capacitor(graph, 0.047 * Units.u, node_1, node_minus, edge)
#graph.add_component(capacitor)
wire = Wire(graph, node_1, node_minus, edge)
graph.add_component(wire)
# Op amp
node_output = Node(graph, source=True, label="Vo")
op_amp = Opamp(graph, node_a=node_minus, node_b=node_plus, node_out=node_output)
graph.add_component(op_amp)
edge = Edge(graph, node_minus, node_output, label="I8")
resistor = Resistor(graph, 1 * Units.M, node_minus, node_output, edge)
graph.add_component(resistor)
# Op amp output
node = Node(graph, label="V4")
edge = Edge(graph, node_output, node, label="I9")
capacitor = Capacitor(graph, 1 * Units.u, node_output, node, edge)
graph.add_component(capacitor)
node_1 = Node(graph, label="V5")
edge = Edge(graph, node, node_1, label="I10")
resistor = Resistor(graph, 10 * Units.K, node, node_1, edge)
graph.add_component(resistor)
edge = Edge(graph, node_1, node_gnd, label="I11")
diode1 = Diode(graph, node_a=node_1, node_b=node_gnd, edge_i=edge)
graph.add_component(diode1)
edge = Edge(graph, node_gnd, node_1, label="I12")
diode2 = Diode(graph, node_a=node_gnd, node_b=node_1, edge_i=edge)
graph.add_component(diode2)
edge = Edge(graph, node_1, node_gnd, label="I13")
capacitor = Capacitor(graph, .001 * Units.u, node_1, node_gnd, edge)
graph.add_component(capacitor)
# Output potentiometer
edge = Edge(graph, node_1, node_out, label="I14")
resistor = Resistor(graph, mix_param * (10 * Units.K), node_1, node_out, edge)
graph.add_component(resistor)
edge = Edge(graph, node_out, node_gnd, label="I15")
resistor = Resistor(graph, (1 - mix_param) * (10 * Units.K), node_out, node_gnd, edge)
graph.add_component(resistor)
# Probe Vout
probes.append(node_out)
mxr_filter = Filter(graph, node_in, node_out, probes=probes)
return mxr_filter
| 32.79845 | 90 | 0.676436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 667 | 0.157646 |
8cc1d660bbd21361e77d7c6470d18d4cac04510e | 2,493 | py | Python | main_rpi.py | adadesions/AutoWestBin | 3156292693426b627d0b24981e8da557ea58be92 | [
"BSD-3-Clause"
] | null | null | null | main_rpi.py | adadesions/AutoWestBin | 3156292693426b627d0b24981e8da557ea58be92 | [
"BSD-3-Clause"
] | 6 | 2020-09-26T01:05:13.000Z | 2022-03-12T00:29:41.000Z | main_rpi.py | adadesions/AutoWestBin | 3156292693426b627d0b24981e8da557ea58be92 | [
"BSD-3-Clause"
] | null | null | null | from imageai.Prediction import ImagePrediction
import cv2
import os
import time
import RPi.GPIO as gpio
def my_prediction(img_path, prob):
result = {}
execution_path = os.getcwd()
prediction = ImagePrediction()
prediction.setModelTypeAsResNet()
prediction.setModelPath(os.path.join(execution_path, "./data/resnet50.h5")) # I rename a model to simple name
prediction.loadModel()
predictions, probabilities = prediction.predictImage(os.path.join(execution_path, img_path), result_count=5)
for eachPrediction, eachProbability in zip(predictions, probabilities):
if( eachProbability >= prob ):
result[eachPrediction] = eachProbability
# print(eachPrediction , " : " , eachProbability)
return result
def image_process():
# The device number is cahangable
cap = cv2.VideoCapture(0)
while True:
# Read Video capture in realtime
ret, frame = cap.read()
cv2.imshow("RPI CAM", frame)
# Get Signal from pin11
# You can change the pin number here!
pir = gpio.input(11)
# If PIR sensor detected movement
if pir == 1:
cv2.imwrite('output.png', frame)
break
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def verify_keyword(sample_word):
code = '100'
keywords = ['bottle', 'can', 'water', 'plastic', 'wine']
gate1 = ['bottle', 'water', 'wine']
gate2 = ['can']
for sample in sample_word.keys():
for keyword in keywords:
if keyword in sample:
if keyword in gate1:
# Send signal to MOTOR here!
# You can add code overhere
code = '110'
elif keyword in gate2:
# Optional
# For sending signal to another MOTOR
code = '101'
print('FOUND:', keyword, 'in', sample)
return code
if __name__ == '__main__':
gpio.setwarnings(False)
gpio.setmode(gpio.BOARD)
gpio.setup(11, gpio.IN)
print("AI is processing...")
# Capture an image and save to disk
image_process()
# Send sample frame from cam to my_prediction function
result = my_prediction('./output.png', 1)
# Display result from my_prediction function
print(result)
# Find keyword in result
code = verify_keyword(result)
print("code to sensors:", code)
| 27.7 | 113 | 0.606097 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 694 | 0.278379 |
8cc3cf10fda2a01f3a17c60d56fd79e90e75b690 | 1,290 | py | Python | example/mini_mnist/rename_nodes.py | ciandt-d1/tf_image_classification | 76ff4cb9ec35418eb20ea3240221bbfb88970737 | [
"MIT"
] | null | null | null | example/mini_mnist/rename_nodes.py | ciandt-d1/tf_image_classification | 76ff4cb9ec35418eb20ea3240221bbfb88970737 | [
"MIT"
] | null | null | null | example/mini_mnist/rename_nodes.py | ciandt-d1/tf_image_classification | 76ff4cb9ec35418eb20ea3240221bbfb88970737 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
import sys
import os
import argparse
import logging
import tensorflow as tf
from cnn_architecture_inception_v4 import cnn_architecture
tf.logging.set_verbosity(tf.logging.INFO)
# Set default flags for the output directories
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
flag_name='checkpoint_path', default_value='',
docstring='Checkpoint path')
tf.app.flags.DEFINE_string(
flag_name='output_checkpoint_path', default_value='',
docstring='Checkpoint path')
tf.app.flags.DEFINE_integer(flag_name='image_size',
default_value=200, docstring="Image size")
def load_and_save_ckpt():
# Create placeholders
X = tf.placeholder(dtype=tf.float32, shape=(
None, FLAGS.image_size, FLAGS.image_size, 3), name='input_image')
# Load net architecture
net_final = cnn_architecture(X, is_training=False)
# Add softmax layer
prediction = tf.argmax(net_final, name="prediction")
saver = tf.train.Saver()
# Open session
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, FLAGS.checkpoint_path)
saver.save(sess, FLAGS.output_checkpoint_path)
if __name__ == "__main__":
load_and_save_ckpt()
| 25.8 | 73 | 0.714729 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 284 | 0.220155 |
8cc58816df23d3d88b38c25c7d3009e7519f3e78 | 2,458 | py | Python | pyopentsdb/put.py | mikecokina/pyopentsdb | b8d78e8f42aed4ebbd6ac3aff925071de41d6b52 | [
"MIT"
] | 2 | 2018-05-09T08:34:30.000Z | 2018-09-25T22:42:09.000Z | pyopentsdb/put.py | mikecokina/pyopentsdb | b8d78e8f42aed4ebbd6ac3aff925071de41d6b52 | [
"MIT"
] | 2 | 2018-12-24T10:51:30.000Z | 2019-01-21T13:55:11.000Z | pyopentsdb/put.py | mikecokina/pyopentsdb | b8d78e8f42aed4ebbd6ac3aff925071de41d6b52 | [
"MIT"
] | null | null | null | from pyopentsdb.utils import request_post
from pyopentsdb import errors
def validate_put_data(data):
if isinstance(data, dict):
data = [data]
for d in data:
if not d.get('metric') or not d.get('timestamp') or not d.get('value') or not d.get('tags'):
raise errors.MissingArgumentError("Missing argument/s in put data")
def put(host, r_session, data, **kwargs):
"""
:param host: str
:param r_session: requests.Session
:param data: list of dicts or dict
:param kwargs: see bellow
:**kwargs options**:
* **summary** * -- bool;
Whether or not to return summary information
* **details** * -- bool;
Whether or not to return detailed information
* **sync** * -- bool;
Whether or not to wait for the data to be flushed to storage before returning the results.
* **sync_timeout** * -- int;
A timeout, in milliseconds, to wait for the data to be flushed to storage before
returning with an error. When a timeout occurs, using the details flag will tell
how many data points failed and how many succeeded. sync must also be given for this
to take effect. A value of 0 means the write will not timeout.
:return: dict
"""
validate_put_data(data)
summary = kwargs.get('summary', False)
details = kwargs.get('details', False)
sync = kwargs.get('sync', False)
sync_timeout = kwargs.get('sync_timeout', 0) if sync else False
url = api_url(host, summary, details, sync, sync_timeout)
kwargs.update(dict(data=data))
return request_post(url, r_session, **kwargs)
def api_url(host, summary, details, sync, sync_timeout):
url = '{}/api/put/'.format(host)
url, previous = (''.join([url, '?summary']), True) if summary else (url, False)
if details:
url, previous = (''.join([url, '?details']), True) if not previous else (''.join([url, '&details']), True)
if sync:
url, previous = (''.join([url, '?sync']), True) if not previous else (''.join([url, '&sync']), True)
if isinstance(sync_timeout, int) and not isinstance(sync_timeout, bool):
url = ''.join([url, '?sync_timeout={}'.format(sync_timeout)]) \
if not previous \
else ''.join([url, '&sync_timeout={}'.format(sync_timeout)])
return url
| 42.37931 | 114 | 0.603743 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,193 | 0.485354 |
8cc589b6aacef8f45330b91b841767184bdc1a72 | 1,848 | py | Python | preprocessing/twitterData.py | aldifahrezi/NLP_3A | 66e1e7fa84bf4af0b0760cf350c43ef75ff72542 | [
"Apache-2.0"
] | null | null | null | preprocessing/twitterData.py | aldifahrezi/NLP_3A | 66e1e7fa84bf4af0b0760cf350c43ef75ff72542 | [
"Apache-2.0"
] | null | null | null | preprocessing/twitterData.py | aldifahrezi/NLP_3A | 66e1e7fa84bf4af0b0760cf350c43ef75ff72542 | [
"Apache-2.0"
] | null | null | null | import re
import csv
import nltk
"""docstring for twitterClean"""
def __init__(self):
super(twitterClean, self).__init__()
def renameUser(corpus):
_new = []
for _temp in corpus:
_temp = re.sub( r'(^|[^@\w])@(\w{1,15})\b','',_temp)
_new.append(_temp)
return _new
def removeHashtag(corpus):
_new = []
for _temp in corpus:
_temp = re.sub(r'#(\w+)', '', _temp)
_new.append(_temp)
return _new
def removeURL(corpus):
_new = []
for _temp in corpus:
_temp = re.sub(r'http:\S+', '', _temp, flags=re.MULTILINE)
_temp = re.sub(r'https:\S+', '', _temp, flags=re.MULTILINE)
_new.append(_temp)
return _new
def removeEmoticon(corpus):
_new = []
emoticons_str = r"(?:[:=;B\-][oO\"\_\-]?[\-D\)\]\(\]/\\Op3]{2,3})"
for _temp in corpus:
_temp.replace(emoticons_str, '')
_temp = re.sub(r'[^\x00-\x7F]', '', _temp)
_new.append(_temp)
return _new
def getTweetData(filename="dataset/Indonesian_Tweets.tsv"):
#Gain large corpus of tweets
toFeed = []
rawSentence = []
with open(filename, 'rU') as csvfile:
spamreader = csv.reader(csvfile, delimiter='\n', quotechar='|')
for spam in spamreader:
rawSentence.append(spam)
corpusSentence =[]
for individualSentence in rawSentence:
if individualSentence == []:
pass
else:
corpusSentence.append(individualSentence[0])
# corpusSentence = self.text.removeAll(corpusSentence)
_temp = removeURL(corpusSentence)
_temp = renameUser(_temp)
_temp = removeHashtag(_temp)
_temp = removeEmoticon(_temp)
for sentences in _temp:
token = nltk.wordpunct_tokenize(sentences.lower())
toFeed.append(token)
return toFeed | 26.782609 | 71 | 0.588745 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 291 | 0.157468 |
8cc61c995bb4f660479b7eb7a1926b0e87dfaec1 | 894 | py | Python | wwwroot/cgi-bin/NetDict/format_conversion.py | fenshitianyue/WebDict | fc04133c3921d6c98f31c1e8608b6e1255088a10 | [
"MIT"
] | 1 | 2019-04-15T04:23:53.000Z | 2019-04-15T04:23:53.000Z | wwwroot/cgi-bin/NetDict/format_conversion.py | fenshitianyue/WebDict | fc04133c3921d6c98f31c1e8608b6e1255088a10 | [
"MIT"
] | null | null | null | wwwroot/cgi-bin/NetDict/format_conversion.py | fenshitianyue/WebDict | fc04133c3921d6c98f31c1e8608b6e1255088a10 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import pymysql
import sys
reload(sys)
sys.setdefaultencoding('utf8')
base = {}
def write_file():
fp = open("/home/zanda/Desktop/PythonCode/new_formatted_data", "w+")
for word, meaning in base.items():
fp.write(word + "\3" + meaning + "\n")
fp.close()
def find(cursor):
try:
sql = "select * from mydict"
cursor.execute(sql)
if cursor.rowcount == 0:
print "ๆฅ่ฏขไธบ็ฉบ"
else:
tmp = cursor.fetchall() # ่ทๅๆฅ่ฏข็ๆๆ็ปๆ๏ผๅณ่ฏๅบ
for row in tmp: #้ๅ่ฏๅบ๏ผๅฐๆฏไธๆกๆฐๆฎๆทปๅ ๅฐๅญๅ
ธbaseไธญ
base[row[0]] = row[1]
# base.update(row[0], row[1]) have bug
except:
print "ๆฅ่ฏขๅคฑ่ดฅ"
if __name__ == '__main__':
db = pymysql.connect("localhost", "root", "nihao.","Dict", charset = "utf8")
cursor = db.cursor()
find(cursor)
write_file()
db.close()
| 23.526316 | 80 | 0.558166 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 341 | 0.351546 |
8cc6690d4201d4fa3d78af7391584f7477660468 | 2,161 | py | Python | app/old/results_to_csv-mo.py | jpenney78/usabmx_results | 7301ba82c8c24f978d9d2196f6cb4311cb85b033 | [
"MIT"
] | null | null | null | app/old/results_to_csv-mo.py | jpenney78/usabmx_results | 7301ba82c8c24f978d9d2196f6cb4311cb85b033 | [
"MIT"
] | null | null | null | app/old/results_to_csv-mo.py | jpenney78/usabmx_results | 7301ba82c8c24f978d9d2196f6cb4311cb85b033 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from bs4 import BeautifulSoup
import urllib2
import re
import sys
import collections
#with open('grands.html') as f:
# soup = BeautifulSoup(f, 'html.parser')
url = sys.argv[-1]
page = urllib2.urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
groups = soup.findAll('h4', class_='race-result-group')
#foo = dir(page)
#bar = page.__dict__
#print foo
#print bar
days = soup.findAll('div', id=re.compile('^rday_'))
#print 'Race, Class, Place, Name, City, State, Sponsor'
print 'Name, Place, Class'
for day in days:
title = day.find('h3', class_='race-result-title')
title = 'title'
#title = title.text
print '\n\n\nRACE: {}, ,'.format(title)
groups = day.findAll('h4', class_='race-result-group')
uls = day.findAll('ul', class_='race-result-list')
count = 0
places = collections.OrderedDict()
p = 1
while p < 8:
places[p] = 0
p += 1
for ul in uls:
group = groups[count].text
class_name = group.split('Total Riders')[0].rstrip()
# print class_name
for li in ul.findAll('li'):
(place, rider) = li.findAll('span')
rider_info = rider.text.rstrip().split(',')
rider_info_len = len(rider_info)
if rider_info_len >= 3:
if rider_info_len == 4:
rider_sponsor = rider_info[1]
else:
rider_sponsor = 'Privateer'
rider_name = rider_info[0]
rider_city = rider_info[-2]
rider_state = rider_info[-1]
if rider_state == ' MO':
#print '{},{},{},{},{},{},{}'.format(title, class_name, place.text, rider_name, rider_city, rider_state, rider_sponsor)
try:
places[int(place.text)] += 1
except:
places[int(place.text)] = 1
print '{}, {}, {}'.format(rider_name, place.text, class_name)
else:
pass
count += 1
print '\n\nPlace Count:'
for k,v in places.iteritems():
print '{} - {}'.format(k,v)
| 28.064935 | 139 | 0.541416 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 607 | 0.280888 |
8cc70168627807f49cf2b5d78f4a3cbbdad4ed64 | 46 | py | Python | bmcs_beam/mxn/scripts/__init__.py | bmcs-group/bmcs_beam | b53967d0d0461657ec914a3256ec40f9dcff80d5 | [
"MIT"
] | 1 | 2021-05-07T11:10:27.000Z | 2021-05-07T11:10:27.000Z | bmcs_beam/mxn/scripts/__init__.py | bmcs-group/bmcs_beam | b53967d0d0461657ec914a3256ec40f9dcff80d5 | [
"MIT"
] | null | null | null | bmcs_beam/mxn/scripts/__init__.py | bmcs-group/bmcs_beam | b53967d0d0461657ec914a3256ec40f9dcff80d5 | [
"MIT"
] | null | null | null | '''
Created on Dec 18, 2016
@author: rch
'''
| 7.666667 | 23 | 0.586957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.978261 |
8cc8bbe3155b2a365f5d5828b375c3db252991ed | 28,180 | py | Python | upvote_post_comments_timebased.py | YuurinBee/steemrewarding | 3bec63381e3ebe6a5131b4694237e27258251551 | [
"MIT"
] | 13 | 2019-01-27T05:53:46.000Z | 2021-07-25T23:24:38.000Z | upvote_post_comments_timebased.py | YuurinBee/steemrewarding | 3bec63381e3ebe6a5131b4694237e27258251551 | [
"MIT"
] | 1 | 2019-08-22T09:29:34.000Z | 2019-08-22T09:29:34.000Z | upvote_post_comments_timebased.py | YuurinBee/steemrewarding | 3bec63381e3ebe6a5131b4694237e27258251551 | [
"MIT"
] | 13 | 2019-04-04T04:17:25.000Z | 2021-08-02T12:55:24.000Z | from beem.utils import formatTimeString, resolve_authorperm, construct_authorperm, addTzInfo
from beem.nodelist import NodeList
from beem.comment import Comment
from beem import Steem
from beem.account import Account
from beem.instance import set_shared_steem_instance
from beem.blockchain import Blockchain
import time
import json
import os
import math
import dataset
import random
from datetime import date, datetime, timedelta
from dateutil.parser import parse
from beem.constants import STEEM_100_PERCENT
from steemrewarding.post_storage import PostsTrx
from steemrewarding.command_storage import CommandsTrx
from steemrewarding.vote_rule_storage import VoteRulesTrx
from steemrewarding.pending_vote_storage import PendingVotesTrx
from steemrewarding.config_storage import ConfigurationDB
from steemrewarding.vote_storage import VotesTrx
from steemrewarding.vote_log_storage import VoteLogTrx
from steemrewarding.failed_vote_log_storage import FailedVoteLogTrx
from steemrewarding.broadcast_vote_storage import BroadcastVoteTrx
from steemrewarding.utils import isfloat, upvote_comment, valid_age, upvote_comment_without_check
from steemrewarding.version import version as rewardingversion
from steemrewarding.account_storage import AccountsDB
from steemrewarding.version import version as rewarding_version
import dataset
if __name__ == "__main__":
config_file = 'config.json'
if not os.path.isfile(config_file):
raise Exception("config.json is missing!")
else:
with open(config_file) as json_data_file:
config_data = json.load(json_data_file)
# print(config_data)
databaseConnector = config_data["databaseConnector"]
wallet_password = config_data["wallet_password"]
posting_auth_acc = config_data["posting_auth_acc"]
voting_round_sec = config_data["voting_round_sec"]
start_prep_time = time.time()
db = dataset.connect(databaseConnector)
# Create keyStorage
print("Start upvote_post_comments_timebased.py")
nobroadcast = False
# nobroadcast = True
postTrx = PostsTrx(db)
votesTrx = VotesTrx(db)
voteRulesTrx = VoteRulesTrx(db)
confStorage = ConfigurationDB(db)
pendingVotesTrx = PendingVotesTrx(db)
voteLogTrx = VoteLogTrx(db)
failedVoteLogTrx = FailedVoteLogTrx(db)
accountsTrx = AccountsDB(db)
broadcastVoteTrx = BroadcastVoteTrx(db)
conf_setup = confStorage.get()
# last_post_block = conf_setup["last_post_block"]
nodes = NodeList()
# nodes.update_nodes(weights={"block": 1})
try:
nodes.update_nodes()
except:
print("could not update nodes")
node_list = nodes.get_nodes(exclude_limited=False)
stm = Steem(node=node_list, num_retries=5, call_num_retries=3, timeout=15, nobroadcast=nobroadcast)
stm.wallet.unlock(wallet_password)
last_voter = None
print("Start apply new timebased votes")
voter_counter = 0
delete_pending_votes = []
rc_sp_to_low_account_list = []
vote_counter = 0
vote_count = 0
for pending_vote in pendingVotesTrx.get_command_list_timed():
settings = None
voter_acc = None
author, permlink = resolve_authorperm(pending_vote["authorperm"])
if pending_vote["voter"] in rc_sp_to_low_account_list:
continue
age_min = (datetime.utcnow() - pending_vote["comment_timestamp"]).total_seconds() / 60
maximum_vote_delay_min = pending_vote["maximum_vote_delay_min"]
if age_min < pending_vote["vote_delay_min"] - voting_round_sec / 2.0 / 60 - 3:
# print("%s is not ready yet - %.2f min should be %.2f" % (pending_vote["authorperm"], age_min, pending_vote["vote_delay_min"]))
continue
if settings is None:
settings = accountsTrx.get(pending_vote["voter"])
if settings is None:
voter_acc = Account(pending_vote["voter"], steem_instance=stm)
print("update %s - did not exists" % pending_vote["voter"])
posting_auth = False
for a in voter_acc["posting"]["account_auths"]:
if a[0] == posting_auth_acc:
posting_auth = True
if pending_vote["voter"] == posting_auth_acc:
posting_auth = True
accountsTrx.upsert({"name": pending_vote["voter"], "vp_update":datetime.utcnow(), "vp": voter_acc.vp, "down_vp": voter_acc.get_downvoting_power(),
"sp": voter_acc.sp, "rc": voter_acc.get_rc_manabar()["current_mana"] / 1e9, "last_update": datetime.utcnow(),
"posting_auth_acc": posting_auth})
pause_votes_below_vp = 0
settings = accountsTrx.get(pending_vote["voter"])
elif settings["sp"] is None or settings["vp"] is None or settings["last_update"] is None or settings["rc"] is None or settings["posting_auth_acc"] is None:
print("update %s - None" % pending_vote["voter"])
voter_acc = Account(pending_vote["voter"], steem_instance=stm)
posting_auth = False
for a in voter_acc["posting"]["account_auths"]:
if a[0] == posting_auth_acc:
posting_auth = True
if pending_vote["voter"] == posting_auth_acc:
posting_auth = True
accountsTrx.upsert({"name": pending_vote["voter"], "vp_update":datetime.utcnow(), "vp": voter_acc.vp, "down_vp": voter_acc.get_downvoting_power(),
"sp": voter_acc.sp, "rc": voter_acc.get_rc_manabar()["current_mana"] / 1e9, "last_update": datetime.utcnow(),
"posting_auth_acc": posting_auth})
settings = accountsTrx.get(pending_vote["voter"])
elif (datetime.utcnow() - settings["last_update"]).total_seconds() / 60 > 1:
print("update %s - last update was before %f s" % (pending_vote["voter"], (datetime.utcnow() - settings["last_update"]).total_seconds()))
voter_acc = Account(pending_vote["voter"], steem_instance=stm)
posting_auth = False
for a in voter_acc["posting"]["account_auths"]:
if a[0] == posting_auth_acc:
posting_auth = True
if pending_vote["voter"] == posting_auth_acc:
posting_auth = True
accountsTrx.upsert({"name": pending_vote["voter"], "vp_update":datetime.utcnow(), "vp": voter_acc.vp, "down_vp": voter_acc.get_downvoting_power(),
"sp": voter_acc.sp, "rc": voter_acc.get_rc_manabar()["current_mana"] / 1e9, "last_update": datetime.utcnow(),
"posting_auth_acc": posting_auth})
settings = accountsTrx.get(pending_vote["voter"])
if pending_vote["vote_weight"] > 0:
pause_votes_below_vp = settings["pause_votes_below_vp"]
vp = settings["vp"]
else:
pause_votes_below_vp = settings["pause_down_votes_below_down_vp"]
vp = settings["down_vp"]
vp_update = settings["last_update"]
if vp_update is not None:
diff_in_seconds = ((datetime.utcnow()) - (vp_update)).total_seconds()
regenerated_vp = diff_in_seconds * 10000 / 432000 / 100
vp = vp + regenerated_vp
#down_vp = down_vp + regenerated_vp
if vp > 100:
vp = 100
#if down_vp > 100:
# down_vp = 100
if vp < pause_votes_below_vp:
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "Voting is paused (VP = %.2f %%, which below pause_votes_below_vp of %.2f %%)" % (vp, pause_votes_below_vp),
"timestamp": datetime.utcnow(), "vote_weight": pending_vote["vote_weight"], "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
# print("time vote %.2f s - %d votes" % (time.time() - start_prep_time, vote_count))
if (pending_vote["vote_weight"] is None or pending_vote["vote_weight"] == 0) and (pending_vote["vote_sbd"] is None or float(pending_vote["vote_sbd"]) <= 0):
# voter_acc = Account(pending_vote["voter"], steem_instance=stm)
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "vote_weight was set to zero. (%s %% and %s $)" % (pending_vote["vote_weight"], pending_vote["vote_sbd"]),
"timestamp": datetime.utcnow(), "vote_weight": pending_vote["vote_weight"], "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
if maximum_vote_delay_min < 0:
maximum_vote_delay_min = 9360
if age_min > maximum_vote_delay_min + voting_round_sec / 60:
# voter_acc = Account(pending_vote["voter"], steem_instance=stm)
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "post is older than %.2f min." % (maximum_vote_delay_min),
"timestamp": datetime.utcnow(), "vote_weight": pending_vote["vote_weight"], "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
voter_counter += 1
# voter_acc = Account(pending_vote["voter"], steem_instance=stm)
if settings["sp"] < 0.1:
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "Could not vot %s, as Steem Power is almost zero." % (pending_vote["authorperm"]),
"timestamp": datetime.utcnow(), "vote_weight": pending_vote["vote_weight"], "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
print("Could not process %s - sp < 0.1" % pending_vote["authorperm"])
rc_sp_to_low_account_list.append(pending_vote["voter"])
continue
if settings["rc"] < 0.5:
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "Could not vot %s, as RC is almost zero." % (pending_vote["authorperm"]),
"timestamp": datetime.utcnow(), "vote_weight": pending_vote["vote_weight"], "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
print("Could not process %s - rc to low" % pending_vote["authorperm"])
rc_sp_to_low_account_list.append(pending_vote["voter"])
continue
vote_weight = pending_vote["vote_weight"]
if vote_weight is None or vote_weight == 0:
voter_acc = Account(pending_vote["voter"], steem_instance=stm)
vote_weight = voter_acc.get_vote_pct_for_SBD(float(pending_vote["vote_sbd"])) / 100.
if vote_weight > 100:
vote_weight = 100
elif vote_weight < 0.01:
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "vote_weight was set to zero.",
"timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": voter_acc.vp, "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
age_hour = ((datetime.utcnow()) - pending_vote["created"]).total_seconds() / 60 / 60
if age_hour > 156:
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "post is older than 6.5 days.",
"timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
if vp < pending_vote["min_vp"]:
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "Voting power is %.2f %%, which is to low. (min_vp is %.2f %%)" % (vp, pending_vote["min_vp"]),
"timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
if pending_vote["max_votes_per_day"] > -1:
if settings is None:
settings = accountsTrx.get(pending_vote["voter"])
if settings is not None:
sliding_time_window = settings["sliding_time_window"]
else:
sliding_time_window = True
votes_24h_before = voteLogTrx.get_votes_per_day(pending_vote["voter"], author, sliding_time_window)
if votes_24h_before >= pending_vote["max_votes_per_day"]:
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "The author was already upvoted %d in the last 24h (max_votes_per_day is %d)." % (votes_24h_before, pending_vote["max_votes_per_day"]),
"timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
if pending_vote["max_votes_per_week"] > -1:
if settings is None:
settings = accountsTrx.get(pending_vote["voter"])
if settings is not None:
sliding_time_window = settings["sliding_time_window"]
else:
sliding_time_window = True
votes_168h_before = voteLogTrx.get_votes_per_week(pending_vote["voter"], author, sliding_time_window)
if votes_168h_before >= pending_vote["max_votes_per_week"]:
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "The author was already upvoted %d in the last 7 days (max_votes_per_week is %d)." % (votes_168h_before, pending_vote["max_votes_per_week"]),
"timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"],"vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
if pending_vote["vp_scaler"] > 0:
vote_weight *= 1 - ((100 - vp) / 100 * pending_vote["vp_scaler"])
if abs(vote_weight) < 0.02:
error_msg = "Vote weight is zero or below zero (%.2f %%)" % vote_weight
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": error_msg,
"timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"],"vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
cnt = 0
c = None
while c is None and cnt < 5:
cnt += 1
try:
c = Comment(pending_vote["authorperm"], use_tags_api=True, steem_instance=stm)
c.refresh()
except:
nodelist = NodeList()
nodelist.update_nodes()
stm = Steem(node=nodelist.get_nodes(), num_retries=5, call_num_retries=3, timeout=15, nobroadcast=nobroadcast)
time.sleep(1)
if cnt == 5:
print("Could not read %s" % (pending_vote["authorperm"]))
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "Could not process %s" % (pending_vote["authorperm"]),
"timestamp": datetime.utcnow(), "vote_weight": pending_vote["vote_weight"], "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": vp, "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
print("Could not process %s" % pending_vote["authorperm"])
continue
votes_list = votesTrx.get_authorperm_votes(pending_vote["authorperm"])
try:
if pending_vote["max_net_votes"] >= 0 and pending_vote["max_net_votes"] < len(votes_list):
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "The number of post/comment votes (%d) is higher than max_net_votes (%d)." % (len(votes_list), pending_vote["max_net_votes"]),
"timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": vp, "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
except:
continue
if False and pending_vote["max_pending_payout"] >= 0 and pending_vote["max_pending_payout"] < float(c["pending_payout_value"]):
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "The pending payout of post/comment votes (%.2f) is higher than max_pending_payout (%.2f)." % (float(c["pending_payout_value"]), pending_vote["max_pending_payout"]),
"timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": vp, "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
# check for max votes per day/week
already_voted = False
for v in votes_list:
if pending_vote["voter"] == v["voter"]:
already_voted = True
if not settings["posting_auth_acc"] or already_voted:
if already_voted:
error_msg = "already voted."
else:
error_msg = "posting authority is missing"
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": error_msg,
"timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": vp, "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
# sucess = upvote_comment(c, pending_vote["voter"], vote_weight)
if False:
reply_message = upvote_comment_without_check(c, pending_vote["voter"], vote_weight)
if reply_message is not None:
vote_count += 1
if pending_vote["leave_comment"]:
try:
if settings is None:
settings = accountsTrx.get(pending_vote["voter"])
if settings is not None and "upvote_comment" in settings and settings["upvote_comment"] is not None:
json_metadata = {'app': 'rewarding/%s' % (rewarding_version)}
reply_body = settings["upvote_comment"]
reply_body = reply_body.replace("{{name}}", "@%s" % c["author"] ).replace("{{voter}}", "@%s" % pending_vote["voter"])
c.reply(reply_body, author=pending_vote["voter"], meta=json_metadata)
except:
print("Could not leave comment!")
voteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "author": c["author"],
"timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"],
"voted_after_min": age_min, "vp": vp, "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"trail_vote": pending_vote["trail_vote"], "main_post": pending_vote["main_post"],
"voter_to_follow": pending_vote["voter_to_follow"]})
expiration = formatTimeString(reply_message["expiration"]).replace(tzinfo=None)
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
else:
expiration = datetime.utcnow()
broadcastVoteTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"],
"weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"], "min_vp": pending_vote["min_vp"],
"vote_when_vp_reached": pending_vote["vote_when_vp_reached"], "main_post": pending_vote["main_post"],
"author": c["author"], "voted_after_min": 0, "created": datetime.utcnow(), "vp": settings["vp"], "down_vp": settings["down_vp"],
"maximum_vote_delay_min": pending_vote["maximum_vote_delay_min"], "comment_timestamp": pending_vote["comment_timestamp"],
"trail_vote": pending_vote["trail_vote"], "voter_to_follow": pending_vote["voter_to_follow"], "leave_comment": pending_vote["leave_comment"],
"vote_timestamp": pending_vote["comment_timestamp"] + timedelta(seconds=pending_vote["vote_delay_min"]/60),
"max_votes_per_day": pending_vote["max_votes_per_day"], "max_votes_per_week": pending_vote["max_votes_per_week"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
for pending_vote in delete_pending_votes:
pendingVotesTrx.delete(pending_vote["authorperm"], pending_vote["voter"], pending_vote["vote_when_vp_reached"])
delete_pending_votes = []
print("%d voter have been checked!" % voter_counter)
print("time vote %.2f s - %d votes" % (time.time() - start_prep_time, vote_count))
| 67.740385 | 282 | 0.610752 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,833 | 0.313449 |
8cca8d879ebb48981faec1a262d979d1e0c2e298 | 13,639 | py | Python | py_src/mlpiot/proto/scene_description_pb2.py | machine2learn/mlpiot.base | da0b77fccbb0e42d1ddbb6dbc490313433dc7575 | [
"Apache-2.0"
] | 1 | 2021-03-30T20:49:54.000Z | 2021-03-30T20:49:54.000Z | py_src/mlpiot/proto/scene_description_pb2.py | machine2learn/mlpiot.base | da0b77fccbb0e42d1ddbb6dbc490313433dc7575 | [
"Apache-2.0"
] | null | null | null | py_src/mlpiot/proto/scene_description_pb2.py | machine2learn/mlpiot.base | da0b77fccbb0e42d1ddbb6dbc490313433dc7575 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mlpiot/proto/scene_description.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mlpiot.proto import google_color_pb2 as mlpiot_dot_proto_dot_google__color__pb2
from mlpiot.proto import google_geometry_pb2 as mlpiot_dot_proto_dot_google__geometry__pb2
from mlpiot.proto import google_timestamp_pb2 as mlpiot_dot_proto_dot_google__timestamp__pb2
from mlpiot.proto import image_pb2 as mlpiot_dot_proto_dot_image__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mlpiot/proto/scene_description.proto',
package='mlpiot.proto',
syntax='proto3',
serialized_options=None,
serialized_pb=b'\n$mlpiot/proto/scene_description.proto\x12\x0cmlpiot.proto\x1a\x1fmlpiot/proto/google_color.proto\x1a\"mlpiot/proto/google_geometry.proto\x1a#mlpiot/proto/google_timestamp.proto\x1a\x18mlpiot/proto/image.proto\"I\n\x17SceneDescriptorMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\x05\x12\x0f\n\x07payload\x18\x03 \x01(\t\"\xc5\x01\n\rObjectInScene\x12\x12\n\nclass_name\x18\x01 \x01(\t\x12\x16\n\x0e\x63lass_icon_url\x18\x02 \x01(\t\x12\x30\n\x0c\x62ounding_box\x18\x03 \x01(\x0b\x32\x1a.mlpiot.proto.BoundingPoly\x12\x12\n\nconfidence\x18\x04 \x01(\x02\x12\r\n\x05score\x18\x05 \x01(\x02\x12\"\n\x05\x63olor\x18\x06 \x01(\x0b\x32\x13.mlpiot.proto.Color\x12\x0f\n\x07payload\x18\x07 \x01(\t\"\xeb\x01\n\x10SceneDescription\x12*\n\ttimestamp\x18\x01 \x01(\x0b\x32\x17.mlpiot.proto.Timestamp\x12\x37\n\x08metadata\x18\x02 \x01(\x0b\x32%.mlpiot.proto.SceneDescriptorMetadata\x12,\n\x07objects\x18\x03 \x03(\x0b\x32\x1b.mlpiot.proto.ObjectInScene\x12,\n\x0f\x61nnotated_image\x18\x04 \x01(\x0b\x32\x13.mlpiot.proto.Image\x12\x16\n\x0einput_image_id\x18\x05 \x01(\x03\"\xad\x01\n\x15SceneDescriptionArray\x12*\n\ttimestamp\x18\x01 \x01(\x0b\x32\x17.mlpiot.proto.Timestamp\x12\x37\n\x08metadata\x18\x02 \x01(\x0b\x32%.mlpiot.proto.SceneDescriptorMetadata\x12/\n\x07objects\x18\x03 \x03(\x0b\x32\x1e.mlpiot.proto.SceneDescriptionb\x06proto3'
,
dependencies=[mlpiot_dot_proto_dot_google__color__pb2.DESCRIPTOR,mlpiot_dot_proto_dot_google__geometry__pb2.DESCRIPTOR,mlpiot_dot_proto_dot_google__timestamp__pb2.DESCRIPTOR,mlpiot_dot_proto_dot_image__pb2.DESCRIPTOR,])
_SCENEDESCRIPTORMETADATA = _descriptor.Descriptor(
name='SceneDescriptorMetadata',
full_name='mlpiot.proto.SceneDescriptorMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='mlpiot.proto.SceneDescriptorMetadata.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='mlpiot.proto.SceneDescriptorMetadata.version', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload', full_name='mlpiot.proto.SceneDescriptorMetadata.payload', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=186,
serialized_end=259,
)
_OBJECTINSCENE = _descriptor.Descriptor(
name='ObjectInScene',
full_name='mlpiot.proto.ObjectInScene',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='class_name', full_name='mlpiot.proto.ObjectInScene.class_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='class_icon_url', full_name='mlpiot.proto.ObjectInScene.class_icon_url', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bounding_box', full_name='mlpiot.proto.ObjectInScene.bounding_box', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='confidence', full_name='mlpiot.proto.ObjectInScene.confidence', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='score', full_name='mlpiot.proto.ObjectInScene.score', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='color', full_name='mlpiot.proto.ObjectInScene.color', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload', full_name='mlpiot.proto.ObjectInScene.payload', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=262,
serialized_end=459,
)
_SCENEDESCRIPTION = _descriptor.Descriptor(
name='SceneDescription',
full_name='mlpiot.proto.SceneDescription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='mlpiot.proto.SceneDescription.timestamp', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='mlpiot.proto.SceneDescription.metadata', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objects', full_name='mlpiot.proto.SceneDescription.objects', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='annotated_image', full_name='mlpiot.proto.SceneDescription.annotated_image', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input_image_id', full_name='mlpiot.proto.SceneDescription.input_image_id', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=462,
serialized_end=697,
)
_SCENEDESCRIPTIONARRAY = _descriptor.Descriptor(
name='SceneDescriptionArray',
full_name='mlpiot.proto.SceneDescriptionArray',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='mlpiot.proto.SceneDescriptionArray.timestamp', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='mlpiot.proto.SceneDescriptionArray.metadata', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objects', full_name='mlpiot.proto.SceneDescriptionArray.objects', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=700,
serialized_end=873,
)
_OBJECTINSCENE.fields_by_name['bounding_box'].message_type = mlpiot_dot_proto_dot_google__geometry__pb2._BOUNDINGPOLY
_OBJECTINSCENE.fields_by_name['color'].message_type = mlpiot_dot_proto_dot_google__color__pb2._COLOR
_SCENEDESCRIPTION.fields_by_name['timestamp'].message_type = mlpiot_dot_proto_dot_google__timestamp__pb2._TIMESTAMP
_SCENEDESCRIPTION.fields_by_name['metadata'].message_type = _SCENEDESCRIPTORMETADATA
_SCENEDESCRIPTION.fields_by_name['objects'].message_type = _OBJECTINSCENE
_SCENEDESCRIPTION.fields_by_name['annotated_image'].message_type = mlpiot_dot_proto_dot_image__pb2._IMAGE
_SCENEDESCRIPTIONARRAY.fields_by_name['timestamp'].message_type = mlpiot_dot_proto_dot_google__timestamp__pb2._TIMESTAMP
_SCENEDESCRIPTIONARRAY.fields_by_name['metadata'].message_type = _SCENEDESCRIPTORMETADATA
_SCENEDESCRIPTIONARRAY.fields_by_name['objects'].message_type = _SCENEDESCRIPTION
DESCRIPTOR.message_types_by_name['SceneDescriptorMetadata'] = _SCENEDESCRIPTORMETADATA
DESCRIPTOR.message_types_by_name['ObjectInScene'] = _OBJECTINSCENE
DESCRIPTOR.message_types_by_name['SceneDescription'] = _SCENEDESCRIPTION
DESCRIPTOR.message_types_by_name['SceneDescriptionArray'] = _SCENEDESCRIPTIONARRAY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SceneDescriptorMetadata = _reflection.GeneratedProtocolMessageType('SceneDescriptorMetadata', (_message.Message,), {
'DESCRIPTOR' : _SCENEDESCRIPTORMETADATA,
'__module__' : 'mlpiot.proto.scene_description_pb2'
# @@protoc_insertion_point(class_scope:mlpiot.proto.SceneDescriptorMetadata)
})
_sym_db.RegisterMessage(SceneDescriptorMetadata)
ObjectInScene = _reflection.GeneratedProtocolMessageType('ObjectInScene', (_message.Message,), {
'DESCRIPTOR' : _OBJECTINSCENE,
'__module__' : 'mlpiot.proto.scene_description_pb2'
# @@protoc_insertion_point(class_scope:mlpiot.proto.ObjectInScene)
})
_sym_db.RegisterMessage(ObjectInScene)
SceneDescription = _reflection.GeneratedProtocolMessageType('SceneDescription', (_message.Message,), {
'DESCRIPTOR' : _SCENEDESCRIPTION,
'__module__' : 'mlpiot.proto.scene_description_pb2'
# @@protoc_insertion_point(class_scope:mlpiot.proto.SceneDescription)
})
_sym_db.RegisterMessage(SceneDescription)
SceneDescriptionArray = _reflection.GeneratedProtocolMessageType('SceneDescriptionArray', (_message.Message,), {
'DESCRIPTOR' : _SCENEDESCRIPTIONARRAY,
'__module__' : 'mlpiot.proto.scene_description_pb2'
# @@protoc_insertion_point(class_scope:mlpiot.proto.SceneDescriptionArray)
})
_sym_db.RegisterMessage(SceneDescriptionArray)
# @@protoc_insertion_point(module_scope)
| 45.768456 | 1,384 | 0.768311 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,654 | 0.267908 |
8ccaa31f1cc37e58d280f157b7d7178e0c6240da | 6,385 | py | Python | main.py | Gaurav3009/SoftCoputingpROJECT | 9ece2cbe9d191308cad4166d9c68bc5bc748532c | [
"CC0-1.0"
] | null | null | null | main.py | Gaurav3009/SoftCoputingpROJECT | 9ece2cbe9d191308cad4166d9c68bc5bc748532c | [
"CC0-1.0"
] | null | null | null | main.py | Gaurav3009/SoftCoputingpROJECT | 9ece2cbe9d191308cad4166d9c68bc5bc748532c | [
"CC0-1.0"
] | null | null | null | import pygame
import random
import math
import numpy as np
from pygame import mixer
x = np.array(([723, 123.4000000000003], [121, 133.40000000000038], [586, 125.40000000000032]), dtype=float )
y = np.array(([99], [86], [89]), dtype=float )
# Scaled Units
x = x / np.amax ( x, axis=0 )
y = y / 100
# maximum value
class NeuralNetwork:
def __init__(self):
self.input_size = 2
self.output_size = 1
self.hidden_size = 3
# Heights
self.w1 = np.random.randn(self.input_size, self.hidden_size)
self.w2 = np.random.randn(self.hidden_size, self.output_size)
def feed_forward(self, x):
# forward propagation to the network
self.z = np.dot(x, self.w1)
self.z2 = self.sigmoid(self.z)
self.z3 = np.dot(self.z2, self.w2)
output = self.sigmoid(self.z3)
return output
def sigmoid(self, s, deriv = False):
if deriv:
return s*(1-s)
return 1/(1+np.exp(-s))
def backward(self, x, y, output):
# Backward propagation through the network
self.output_error = y - output
self.output_delta = self.output_error * self.sigmoid(output, deriv = True)
self.z2_error = self.output_delta.dot(self.w2.T)
self.z2_delta = self.z2_error * self.sigmoid(self.z2, deriv = True)
# Weight updation
self.w1 += x.T.dot(self.z2_delta)
self.w2 += self.z2.T.dot(self.output_delta)
def train(self, x, y):
output = self.feed_forward(x)
self.backward(x, y, output)
NN = NeuralNetwork()
for i in range(500000):
NN.train(x, y)
print("Predicted Output : " + str(NN.feed_forward(x)))
# Initialize the pygame
pygame.init()
# create the screen
screen = pygame.display.set_mode((800, 600))
# Background
background = pygame.image.load("bg.png")
# Background Sound
mixer.music.load("DeathMatch.ogg")
mixer.music.play(-1)
# Title and the Icon of the game
pygame.display.set_caption("Space Shooter")
icon = pygame.image.load("ufoBlue.png")
pygame.display.set_icon(icon)
# Player
playerImg = pygame.image.load("player.png")
X = 370
Y = 480
player_speed = 2
clock = pygame.time.Clock()
# Enemy
enemyImg = []
enemyX = []
enemyY = []
num_of_enemies = 3
enemy_speed = 1
for i in range(num_of_enemies):
enemyImg.append(pygame.image.load("alien.png"))
enemyX.append(random.randint(0, 736))
enemyY.append(random.randint(0, 20))
# Bullets
bullets = pygame.image.load("bullet_pl.png")
bulletX = 0
bulletY = 480
bullet_state = "ready"
bullet_x_change = 0
bullet_y_change = 8
# Score
score_value = 0
font = pygame.font.Font('freesansbold.ttf', 32)
textX = 10
textY = 10
def show_score(x, y):
score = font.render("Score : " + str(score_value), True, (255, 0, 0))
screen.blit(score, (x, y))
def player(x, y):
screen.blit(playerImg, (x, y))
def enemy(x, y, i):
screen.blit(enemyImg[i], (x, y))
def bullet(x, y):
global bullet_state
bullet_state = "fire"
screen.blit(bullets, (x+18.8, y-5))
def is_collision(enemy_x, enemy_y, bullet_x, bullet_y):
distance = math.sqrt(math.pow((enemy_x - bullet_x), 2) + math.pow((enemy_y - bullet_y), 2))
if distance < 27:
return True
else:
return False
# Game Loop
def run_game():
global playerImg
global player_speed
global bulletX
global bulletY
global score_value
global bullet_state
global X, Y
global enemyX, enemyY
left_move = False
right_move = False
blast = False
action = True
running = True
while running:
# RGB value
screen.fill((0, 0, 0))
# Background Image
screen.blit(background, (0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# Left and Right movement of the player
if event.type == pygame.KEYUP:
player_speed = 0
# For updating the changes made
X = X + player_speed
# Setting Boundaries in our game window
if X <= 0:
X = 0
elif X >= 736:
X = 736
player(X, Y)
# Enemy Movement
for i in range(num_of_enemies):
enemyY[i] = enemyY[i] + enemy_speed
if enemyY[i] >= 600:
enemyX[i] = random.randint(0, 736)
enemyY[i] = random.randint(0, 50)
score_value -= 1
enemy(enemyX[i], enemyY[i], i)
# Collision
collision = is_collision(enemyX[i], enemyY[i], bulletX, bulletY )
if collision:
bulletY = 480
bullet_state = "ready"
score_value += 1
print(score_value)
enemyX[i] = random.randint(0, 736)
enemyY[i] = random.randint(0, 50)
x1 = ([enemyX[0], enemyY[0]], [enemyX[1], enemyY[1]], [enemyX[2], enemyY[2]])
y1 = NN.feed_forward ( x1 )
max_value_index = 0
max_value = -1
a = 0
for i in y1 :
if i > max_value :
max_value = i
max_value_index = a
a += 1
if X < enemyX[max_value_index]:
while X < enemyX[max_value_index]:
X = X + player_speed
elif X > enemyX[max_value_index]:
while X > enemyX[max_value_index]:
X = X - player_speed
blast = True
if action :
if left_move :
player_speed = -4
if right_move :
player_speed = 4
if blast :
bulletX = X
bullet_sound = mixer.Sound ( "laser5.wav" )
bullet_sound.play ( )
bullet ( X, bulletY )
blast = False
# BulletMovement
if bulletY <= 0:
bullet_state = "ready"
bulletX = 0
bulletY = 480
if bullet_state is "fire":
bullet(bulletX, bulletY)
bulletY -= bullet_y_change
show_score(textX, textY)
pygame.display.update()
clock.tick(90)
run_game()
| 26.168033 | 109 | 0.549883 | 1,273 | 0.199374 | 0 | 0 | 0 | 0 | 0 | 0 | 668 | 0.10462 |
8ccc3f085556e9b031c98cfec64f5d267b519580 | 3,771 | py | Python | include/fetchfile.py | dongniu/cadnano2 | 6805fe2af856c59b06373c0ee0142ad6bc286262 | [
"Unlicense"
] | 17 | 2015-02-07T03:46:49.000Z | 2021-09-25T09:23:41.000Z | include/fetchfile.py | scholer/cadnano2 | 0b8bba1ab3277ac9859ef78615890d351561784c | [
"Unlicense"
] | 2 | 2017-08-22T03:17:16.000Z | 2021-07-03T14:42:41.000Z | include/fetchfile.py | scholer/cadnano2 | 0b8bba1ab3277ac9859ef78615890d351561784c | [
"Unlicense"
] | 9 | 2015-09-06T22:41:38.000Z | 2022-03-27T13:57:37.000Z | #!/usr/bin/env python
# encoding: utf-8
# The MIT License
#
# Copyright (c) 2011 Wyss Institute at Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# http://www.opensource.org/licenses/mit-license.php
from urllib2 import Request, urlopen, URLError, HTTPError
import sys
from os.path import basename, dirname, splitext, exists
import os
import shutil
import tarfile
def fetchFile(filename, baseurl, filemode='b', filetype='gz', filepath=None):
"""
filepath - is optional file path location to store the fetched file
"""
# create the url and the request
url = baseurl + '/' + filename
request = Request(url)
# Open the url
try:
f_url = urlopen(request)
print "downloading " + url
# Open our local file for writing
f_dest = open(filename, "w" + filemode)
# Write to our local file
f_dest.write(f_url.read())
f_dest.close()
# handle errors
except HTTPError, e:
print "HTTP Error:", e.code , url
except URLError, e:
print "URL Error:", e.reason , url
filename_out = filename
# unzip if possible
if filetype == 'gz':
# get the extracted folder name
filename_out = splitext(filename)[0]
temp = splitext(filename_out)
if temp[1] == '.tar':
filename_out = temp[0]
# open the archive
try:
f_zip= tarfile.open(filename, mode='r')
except tarfile.ReadError, e:
print "unable to read archive", e.code
print "extracting " + filename_out
try:
if filepath:
# remove existing folder
if os.path.exists(filepath + '/' + filename_out):
print "file exists"
shutil.rmtree(filepath + '/' + filename_out)
else:
print "file does not exist", filename_out
f_zip.extractall(path=filepath)
else:
# remove existing folder
if os.path.exists(filename_out):
print "file exists"
shutil.rmtree(filename_out)
else:
print "file does not exist", filename_out
f_zip.extractall()
except tarfile.ExtractError, e:
print "unable to extract archive", e.code
f_zip.close()
# remove the archive
print "removing the downloaded archive", filename
os.remove(filename)
print "done"
return filename_out
# end def
if __name__ == '__main__':
argv = sys.argv
url = argv[1]
filename = basename(url)
base_url = dirname(url)
fetchFile(filename, base_url)
| 33.669643 | 79 | 0.629011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,809 | 0.479714 |
8ccc7527ffc47c5ff344fbc2004901e2de010bf4 | 242 | py | Python | core/exception.py | simyy/flask_app | ec34a4e939620b2d261364b40e85f8211f8f9e9d | [
"MIT"
] | null | null | null | core/exception.py | simyy/flask_app | ec34a4e939620b2d261364b40e85f8211f8f9e9d | [
"MIT"
] | 2 | 2021-03-25T22:14:36.000Z | 2021-06-01T22:51:34.000Z | core/exception.py | simyy/flask_app | ec34a4e939620b2d261364b40e85f8211f8f9e9d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
class BaseException(Exception):
def __init__(self, code, msg):
self.code = code
self.msg = msg
def __str__(self):
return '<%s %s>' % (self.__class__.__name__, self.code)
| 20.166667 | 63 | 0.607438 | 203 | 0.838843 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.181818 |
8cccebd0d964c1db537e29204485613a1e3bfc4a | 4,410 | py | Python | pyjs/tests/test-report.py | allbuttonspressed/pyjs | c726fdead530eb63ee4763ae15daaa58d84cd58f | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2018-09-19T09:14:16.000Z | 2018-09-19T09:14:16.000Z | pyjs/tests/test-report.py | andreyvit/pyjamas | 1154abe3340a84dba7530b8174aaddecfc1a0944 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | pyjs/tests/test-report.py | andreyvit/pyjamas | 1154abe3340a84dba7530b8174aaddecfc1a0944 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-11-18T14:17:59.000Z | 2019-11-18T14:17:59.000Z | #!/usr/bin/env python
import sys
import difflib
differ = difflib.HtmlDiff()
class Coverage:
def __init__(self, testset_name):
self.testset_name = testset_name
self.lines = {}
def tracer(self, frame, event, arg):
lineno = frame.f_lineno
filename = frame.f_globals["__file__"]
if filename[-4:] in [".pyc", ".pyo"]:
filename = filename[:-1]
self.lines[filename][lineno] = self.lines.setdefault(filename, {}).get(lineno, 0) + 1
return self.tracer
def start(self):
sys.settrace(self.tracer)
def stop(self):
sys.settrace(None)
def output(self, *files):
print """
<html>
<head>
<title>Coverage for %s</title>
<style>
body {
color: #000;
background-color: #FFF;
}
h1, h2 {
font-family: sans-serif;
font-weight: normal;
}
td {
white-space: pre;
padding: 1px 5px;
font-family: monospace;
font-size: 10pt;
}
td.hit {
}
td.hit-line {
}
td.miss {
background-color: #C33;
}
td.miss-line {
background-color: #FCC;
}
td.ignore {
color: #999;
}
td.ignore-line {
color: #999;
}
td.lineno {
color: #999;
background-color: #EEE;
}
</style>
</head>
<body>
""" % self.testset_name
print """
<h1>Coverage for %s</h1>
""" % self.testset_name
for filename in files:
print """
<h2>%s</h2>
<table>
""" % filename
code = open(filename).readlines()
for lineno, line in enumerate(code):
count = self.lines[filename].get(lineno + 1, 0)
if count == 0:
if line.strip() in ["", "else:"] or line.strip().startswith("#"):
klass = "ignore"
else:
klass = "miss"
else:
klass = "hit"
klass2 = klass + "-line"
print """<tr><td class="lineno">%s</td><td class="%s">%s</td><td class="%s">%s</td></tr>""" % (lineno + 1, klass, count, klass2, line.strip("\n"))
print """
</table>
"""
print """
</body>
</html>
"""
print """
<html>
<head>
<style>
.diff_add { background: #9F9; }
.diff_sub { background: #F99; }
.diff_chg { background: #FF9; }
.diff_header { background: #DDD; padding: 0px 3px; }
.diff_next { padding: 0px 3px; }
table.diff {
font-family: monospace;
}
</style>
</head>
<body>
"""
def test(filename, module):
print "<h1>" + filename + "</h1>"
try:
output = pyjs.translate(filename + ".py", module)
desired_output = open(filename + ".js").read()
if output == desired_output:
print "<p>pass</p>"
else:
print differ.make_table(output.split("\n"), desired_output.split("\n"), context=True)
except Exception, e:
print "\texception", e
import sys
sys.path.append("..")
import pyjs
test("test001", "ui")
test("test002", "ui")
test("test003", "ui")
test("test004", "ui")
test("test005", "ui")
test("test006", "ui")
test("test007", "ui")
test("test008", "ui")
test("test009", "ui")
test("test010", None)
test("test011", None)
test("test012", None)
test("test013", "ui")
test("test014", None)
test("test015", None)
test("test016", None)
test("test017", None)
test("test018", None)
test("test019", None)
test("test020", None)
test("test021", None)
test("test022", None)
test("test023", None)
test("test024", None)
test("test025", None)
test("test026", None)
test("test027", None)
test("test028", None)
test("test029", None)
test("test030", None)
test("test031", None)
test("test032", None)
test("test033", None)
test("test034", None)
test("test035", None)
test("test036", None)
test("test037", None)
test("test038", None)
test("test039", None)
test("test040", None)
test("test041", None)
test("test042", None)
test("test043", None)
test("test044", None)
test("test045", None)
test("test046", None)
print """
</body>
</html>
""" | 23.089005 | 162 | 0.506576 | 2,475 | 0.561224 | 0 | 0 | 0 | 0 | 0 | 0 | 2,069 | 0.469161 |
8ccdb14c011a11d11901d7ef7caff934dda6ac3a | 763 | py | Python | datasets/pytorch_provider.py | ikhlestov/XNOR-Net | b95a57911858b9d750366c0d9d5e46ab3c7bf220 | [
"MIT"
] | 13 | 2017-08-25T07:45:48.000Z | 2022-02-06T14:46:37.000Z | datasets/pytorch_provider.py | ikhlestov/XNOR-Net | b95a57911858b9d750366c0d9d5e46ab3c7bf220 | [
"MIT"
] | null | null | null | datasets/pytorch_provider.py | ikhlestov/XNOR-Net | b95a57911858b9d750366c0d9d5e46ab3c7bf220 | [
"MIT"
] | 6 | 2017-08-10T19:53:09.000Z | 2018-04-21T03:31:33.000Z | import torch
import torchvision
import torchvision.transforms as transforms
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
def get_loaders(batch_size):
trainset = torchvision.datasets.CIFAR10(
root='/tmp/cifar10', train=True,
download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(
trainset, batch_size=4,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(
root='/tmp/cifar10', train=False,
download=True, transform=transform)
test_loader = torch.utils.data.DataLoader(
testset, batch_size=4,
shuffle=False, num_workers=2)
return train_loader, test_loader
| 28.259259 | 61 | 0.686763 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.036697 |
8ccf7d8c58166e297f3266ddf7b93c9e740f2087 | 1,582 | py | Python | pymilvus_orm/__init__.py | PahudPlus/pymilvus-orm | 78e2e38e71cff92ed6d243dcac85314230ce0fdc | [
"Apache-2.0"
] | null | null | null | pymilvus_orm/__init__.py | PahudPlus/pymilvus-orm | 78e2e38e71cff92ed6d243dcac85314230ce0fdc | [
"Apache-2.0"
] | null | null | null | pymilvus_orm/__init__.py | PahudPlus/pymilvus-orm | 78e2e38e71cff92ed6d243dcac85314230ce0fdc | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""client module"""
from pkg_resources import get_distribution, DistributionNotFound
from .collection import Collection
from .connections import (
Connections,
connections,
add_connection,
list_connections,
get_connection_addr,
remove_connection,
connect,
get_connection,
disconnect
)
from .index import Index
from .partition import Partition
from .utility import (
loading_progress,
index_building_progress,
wait_for_loading_complete,
wait_for_index_building_complete,
has_collection,
has_partition,
list_collections,
)
from .search import SearchResult, Hits, Hit
from .types import DataType
from .schema import FieldSchema, CollectionSchema
from .future import SearchResultFuture, InsertFuture
__version__ = '0.0.0.dev'
try:
__version__ = get_distribution('pymilvus-orm').version
except DistributionNotFound:
# package is not installed
pass
| 29.849057 | 99 | 0.73641 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 652 | 0.412137 |
8ccfac24e337defd5868156614ec3178f582fb2f | 1,550 | py | Python | test/python/test_preemptive.py | plandes/util | 20290b88406c7d4787857a80503af2de92b04cab | [
"MIT"
] | null | null | null | test/python/test_preemptive.py | plandes/util | 20290b88406c7d4787857a80503af2de92b04cab | [
"MIT"
] | null | null | null | test/python/test_preemptive.py | plandes/util | 20290b88406c7d4787857a80503af2de92b04cab | [
"MIT"
] | null | null | null | from typing import Iterable, Any
import unittest
from zensols.persist import ReadOnlyStash, PreemptiveStash
class RangeStash(ReadOnlyStash):
def __init__(self, n: int, end: int = None):
super().__init__()
self.n = n
self.end = end
self.keyed = False
self.loaded = False
def load(self, name: str) -> Any:
self.loaded = True
if self.exists(name):
return name
def keys(self) -> Iterable[str]:
self.keyed = True
if self.end is not None:
return range(self.n, self.end)
else:
return range(self.n)
def exists(self, name: str) -> bool:
n = int(name)
if self.end is None:
if (n >= self.n):
return False
elif (n < self.n) or (n >= self.end):
return False
return True
class TestPreemptiveStash(unittest.TestCase):
def setUp(self):
self.rs = RangeStash(3)
self.pe = PreemptiveStash(self.rs)
def test_data_first(self):
self.assertFalse(self.rs.keyed)
self.assertFalse(self.rs.loaded)
self.assertEqual(((0, 0), (1, 1), (2, 2)), tuple(self.pe))
self.assertTrue(self.pe.has_data)
self.assertTrue(self.rs.keyed)
self.assertTrue(self.rs.loaded)
def test_has_data_first(self):
self.assertFalse(self.rs.keyed)
self.assertFalse(self.rs.loaded)
self.assertTrue(self.pe.has_data)
self.assertTrue(self.rs.keyed)
self.assertFalse(self.rs.loaded)
| 28.181818 | 66 | 0.589032 | 1,436 | 0.926452 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
8cd077908bea63bee9a58db575f3f5c0a02eb319 | 2,937 | py | Python | scripts/tokenize_corpora.py | Mrpatekful/Pytorch-MT | 65c53245a9ab0bf4f5d933a239de1bfd7be79c3f | [
"MIT"
] | 7 | 2018-02-15T10:54:57.000Z | 2018-03-07T16:53:35.000Z | scripts/tokenize_corpora.py | Mrpatekful/nmt-BMEVIAUAL01 | 65c53245a9ab0bf4f5d933a239de1bfd7be79c3f | [
"MIT"
] | null | null | null | scripts/tokenize_corpora.py | Mrpatekful/nmt-BMEVIAUAL01 | 65c53245a9ab0bf4f5d933a239de1bfd7be79c3f | [
"MIT"
] | null | null | null | """
"""
import tqdm
import argparse
DEFAULT_INPUT = '/media/patrik/1EDB65B8599DD93E/data/eng/test'
DEFAULT_OUTPUT = '/media/patrik/1EDB65B8599DD93E/data/eng/test_tok'
DEFAULT_MIN = 3
DEFAULT_MAX = 60
def tokenize(word):
sub_words = []
sub_word = ''
for index, char in enumerate(word):
if not char.isalnum() and char != '\'':
if sub_word != '':
sub_words.append(sub_word)
sub_word = ''
sub_words.append(char)
else:
sub_word += char
if sub_word != '':
sub_words.append(sub_word)
return sub_words
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--min', action='store', dest='min_length', type=int, default=DEFAULT_MIN,
help='minimum length of a line')
parser.add_argument('--max', action='store', dest='max_length', type=int, default=DEFAULT_MAX,
help='maximum length of a line')
parser.add_argument('-i', '--input', action='store', dest='input_file', type=str, default=DEFAULT_INPUT,
help='path of the input file')
parser.add_argument('-o', '--output', action='store', dest='output_file', type=str, default=DEFAULT_OUTPUT,
help='path of the output file')
arguments = parser.parse_args()
_min_length = arguments.min_length
_max_length = arguments.max_length
_file_input = arguments.input_file
_file_output = arguments.output_file
removed_line_count = 0
line_count = 0
with open(_file_input, 'r', encoding='utf-8') as file_raw:
with open(_file_output, 'w', encoding='utf-8') as file_tok:
with tqdm.tqdm() as p_bar:
p_bar.set_description('Tokenizing corpora')
for line in file_raw:
p_bar.update()
line_count += 1
line_as_list = line.strip().split()
for word_index in range(len(line_as_list)-1, -1, -1):
if len(line_as_list[word_index]) > 1 and not line_as_list[word_index].isalnum():
sub_words = tokenize(line_as_list[word_index])
del line_as_list[word_index]
for sub_word_index in range(len(sub_words)):
line_as_list.insert(word_index + sub_word_index, sub_words[sub_word_index])
line_as_list = list(filter(lambda x: x != '' and x != ' ', line_as_list))
if _max_length > len(line_as_list) > _min_length:
file_tok.write('%s\n' % ' '.join(list(map(lambda x: x.lower(), line_as_list))))
else:
removed_line_count += 1
print(f'Number of removed lines: {removed_line_count} ({float(removed_line_count/line_count)*100:.4}%)')
if __name__ == '__main__':
main()
| 35.817073 | 111 | 0.578141 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 496 | 0.16888 |
8cd09fbcc54d1551c8b23bec979629192ae13b6e | 4,192 | py | Python | linux/lib/python2.7/dist-packages/samba/tests/ntacls.py | nmercier/linux-cross-gcc | a5b0028fd2b72ec036a4725e93ba29d73cb753a6 | [
"BSD-3-Clause"
] | 3 | 2015-10-31T10:39:25.000Z | 2019-04-27T20:19:33.000Z | linux/lib/python2.7/dist-packages/samba/tests/ntacls.py | nmercier/linux-cross-gcc | a5b0028fd2b72ec036a4725e93ba29d73cb753a6 | [
"BSD-3-Clause"
] | null | null | null | linux/lib/python2.7/dist-packages/samba/tests/ntacls.py | nmercier/linux-cross-gcc | a5b0028fd2b72ec036a4725e93ba29d73cb753a6 | [
"BSD-3-Clause"
] | null | null | null | # Unix SMB/CIFS implementation. Tests for ntacls manipulation
# Copyright (C) Matthieu Patou <mat@matws.net> 2009-2010
# Copyright (C) Andrew Bartlett 2012
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.ntacls."""
from samba.ntacls import setntacl, getntacl, XattrBackendError
from samba.param import LoadParm
from samba.dcerpc import security
from samba.tests import TestCaseInTempDir, SkipTest
import os
class NtaclsTests(TestCaseInTempDir):
def test_setntacl(self):
lp = LoadParm()
acl = "O:S-1-5-21-2212615479-2695158682-2101375467-512G:S-1-5-21-2212615479-2695158682-2101375467-513D:(A;OICI;0x001f01ff;;;S-1-5-21-2212615479-2695158682-2101375467-512)"
open(self.tempf, 'w').write("empty")
lp.set("posix:eadb",os.path.join(self.tempdir,"eadbtest.tdb"))
setntacl(lp, self.tempf, acl, "S-1-5-21-2212615479-2695158682-2101375467")
os.unlink(os.path.join(self.tempdir,"eadbtest.tdb"))
def test_setntacl_getntacl(self):
lp = LoadParm()
acl = "O:S-1-5-21-2212615479-2695158682-2101375467-512G:S-1-5-21-2212615479-2695158682-2101375467-513D:(A;OICI;0x001f01ff;;;S-1-5-21-2212615479-2695158682-2101375467-512)"
open(self.tempf, 'w').write("empty")
lp.set("posix:eadb",os.path.join(self.tempdir,"eadbtest.tdb"))
setntacl(lp,self.tempf,acl,"S-1-5-21-2212615479-2695158682-2101375467")
facl = getntacl(lp,self.tempf)
anysid = security.dom_sid(security.SID_NT_SELF)
self.assertEquals(facl.as_sddl(anysid),acl)
os.unlink(os.path.join(self.tempdir,"eadbtest.tdb"))
def test_setntacl_getntacl_param(self):
lp = LoadParm()
acl = "O:S-1-5-21-2212615479-2695158682-2101375467-512G:S-1-5-21-2212615479-2695158682-2101375467-513D:(A;OICI;0x001f01ff;;;S-1-5-21-2212615479-2695158682-2101375467-512)"
open(self.tempf, 'w').write("empty")
setntacl(lp,self.tempf,acl,"S-1-5-21-2212615479-2695158682-2101375467","tdb",os.path.join(self.tempdir,"eadbtest.tdb"))
facl=getntacl(lp,self.tempf,"tdb",os.path.join(self.tempdir,"eadbtest.tdb"))
domsid=security.dom_sid(security.SID_NT_SELF)
self.assertEquals(facl.as_sddl(domsid),acl)
os.unlink(os.path.join(self.tempdir,"eadbtest.tdb"))
def test_setntacl_invalidbackend(self):
lp = LoadParm()
acl = "O:S-1-5-21-2212615479-2695158682-2101375467-512G:S-1-5-21-2212615479-2695158682-2101375467-513D:(A;OICI;0x001f01ff;;;S-1-5-21-2212615479-2695158682-2101375467-512)"
open(self.tempf, 'w').write("empty")
self.assertRaises(XattrBackendError, setntacl, lp, self.tempf, acl, "S-1-5-21-2212615479-2695158682-2101375467","ttdb", os.path.join(self.tempdir,"eadbtest.tdb"))
def test_setntacl_forcenative(self):
if os.getuid() == 0:
raise SkipTest("Running test as root, test skipped")
lp = LoadParm()
acl = "O:S-1-5-21-2212615479-2695158682-2101375467-512G:S-1-5-21-2212615479-2695158682-2101375467-513D:(A;OICI;0x001f01ff;;;S-1-5-21-2212615479-2695158682-2101375467-512)"
open(self.tempf, 'w').write("empty")
lp.set("posix:eadb", os.path.join(self.tempdir,"eadbtest.tdb"))
self.assertRaises(Exception, setntacl, lp, self.tempf ,acl,
"S-1-5-21-2212615479-2695158682-2101375467","native")
def setUp(self):
super(NtaclsTests, self).setUp()
self.tempf = os.path.join(self.tempdir, "test")
open(self.tempf, 'w').write("empty")
def tearDown(self):
os.unlink(self.tempf)
super(NtaclsTests, self).tearDown()
| 50.506024 | 179 | 0.70062 | 3,172 | 0.756679 | 0 | 0 | 0 | 0 | 0 | 0 | 2,134 | 0.509065 |
8cd0f31842c087bd945dcd13f2623103459a5e00 | 4,930 | py | Python | data_filter_azure/data_filter_azure/documentdb_server.py | abhushansahu/contrib | c95f68c9dbb48507a06dab4a070efd892765a3a4 | [
"Apache-2.0"
] | 238 | 2017-04-27T20:53:16.000Z | 2022-03-29T15:27:58.000Z | data_filter_azure/data_filter_azure/documentdb_server.py | abhushansahu/contrib | c95f68c9dbb48507a06dab4a070efd892765a3a4 | [
"Apache-2.0"
] | 88 | 2017-04-26T02:58:57.000Z | 2022-03-30T20:05:25.000Z | data_filter_azure/data_filter_azure/documentdb_server.py | abhushansahu/contrib | c95f68c9dbb48507a06dab4a070efd892765a3a4 | [
"Apache-2.0"
] | 152 | 2017-04-27T18:28:25.000Z | 2022-03-30T21:44:24.000Z | #!/usr/bin/env python
import requests
import base64
import json
from flask import Flask,redirect, jsonify, abort, make_response, g
import config
from flask_bootstrap import Bootstrap
import azure.common
from data_filter_azure import opa
import azure.cosmos.cosmos_client as cosmos_client
app = Flask(__name__)
Bootstrap(app)
def check_access_opa(registry_id, user_id, type, resource_name, action):
decision = query_opa(registry_id, user_id, type, resource_name, action)
if not decision.defined:
raise abort(403)
sql = opa.splice(SELECT='permissions.id', FROM='permissions JOIN map in permissions.map', WHERE=None, decision=decision)
print(sql)
result = query_cosmosdb(sql, args=None, one=True)
if len(result) == 0:
return False
return True
@app.route('/api/registries/<registry_id>/users/<user_id>/<type>/<resource_name>/<action>', methods=["GET"])
def api_check_access(registry_id, user_id, type, resource_name, action):
return jsonify(check_access_opa(registry_id, user_id, type, resource_name, action))
@app.route('/')
def index():
return redirect('https://docs.microsoft.com/en-us/azure/cosmos-db/introduction', code = 302)
def query_cosmosdb(query, args=[], one=False):
dbinfo = get_cosmosdb()
cosmosdbquery = {
"query": query
}
options = {}
options['enableCrossPartitionQuery'] = True
options['maxItemCount'] = 2
client = dbinfo['client']
container = dbinfo['container']
result_iterable = client.QueryItems(container['_self'], cosmosdbquery, options)
values = []
for item in iter(result_iterable):
return item
values.append(item)
return values
def query_opa(registry_id, user_id, type, resourceName, action):
input = {
'registry': registry_id,
'user': user_id,
'type': type,
'resourceName': resourceName,
'action': action
}
return opa.compile(q='data.documentdb.example.allow==true',
input=input,
unknowns=['permissions'])
def get_cosmosdb():
dbinfo = dict();
client = cosmos_client.CosmosClient(url_connection=config.COSMOSDB_ENDPOINT, auth={
'masterKey': config.COSMOSDB_PRIMARYKEY})
dbinfo['client'] = client
id = config.COSMOSDB_DATABASE
databases = list(client.QueryDatabases({
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [
{ "name":"@id", "value": id }
]
}))
if len(databases) > 0:
db = databases[0]
else:
db = client.CreateDatabase({'id': id})
dbinfo['db'] = db
containerid = 'permissions'
database_link = 'dbs/' + id
collections = list(client.QueryContainers(
database_link,
{
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [
{ "name":"@id", "value": containerid }
]
}
))
if len(collections) > 0:
container = collections[0]
else:
options = {
'offerThroughput': 400
}
container_definition = {
'id': containerid,
'partitionKey': {'paths': ['/registry']}
}
container = client.CreateContainer(db['_self'], container_definition, options)
dbinfo['container'] = container
return dbinfo
def add_documents():
dbinfo = get_cosmosdb()
client = dbinfo['client']
container = dbinfo['container']
for document in DOCUMENTS:
client.UpsertItem(container['_self'], document)
def init_db():
with app.app_context():
add_documents()
DOCUMENTS = [
{
'registry' : 'registry1',
'user': 'bob',
'id': 'blob',
'map': [
{
"type": "repositories",
"name": "repo1",
"actions": ["read", "write"]
},
{
"type": "repositories",
"name": "repo2",
"actions": ["*"]
},
{
"type": "charts",
"name": "chart1",
"actions": ["read", "write"]
},
{
"type": "pipelines",
"name": "*",
"actions": ["read"]
}
]
},
{
'registry' : 'registry1',
'user': 'alice',
'id': 'alice',
'map': [
{
"type": "repositories",
"name": "*",
"actions": ["*"]
},
{
"type": "charts",
"name": "chart1",
"actions": ["read"]
}
]
}
]
if __name__ == '__main__':
init_db()
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.run(debug=True)
| 28.333333 | 124 | 0.539351 | 0 | 0 | 0 | 0 | 394 | 0.079919 | 0 | 0 | 1,082 | 0.219473 |
8cd2916e88df0cf37c009472aa1b72d1e430bae2 | 4,310 | py | Python | Database_Development/RunningSQLBackEnd/training.py | data-intelligence-analysis/dataworks_scripts | 5148c33c42bef6d092fb1466b8970cb6961b97f0 | [
"Apache-2.0"
] | 3 | 2020-03-24T17:19:02.000Z | 2020-04-28T05:18:50.000Z | Database_Development/RunningSQLBackEnd/training.py | data-intelligence-analysis/dataworks_scripts | 5148c33c42bef6d092fb1466b8970cb6961b97f0 | [
"Apache-2.0"
] | 1 | 2021-06-29T17:19:03.000Z | 2021-06-29T17:19:03.000Z | Database_Development/RunningSQLBackEnd/training.py | data-intelligence-analysis/dataworks_scripts | 5148c33c42bef6d092fb1466b8970cb6961b97f0 | [
"Apache-2.0"
] | null | null | null | from sqlite3 import connect
##Dennis Osafo
def show_menu():
print("\n------")
print ("MENU:")
print ("_____")
print ("1. Add a student")
print ("2. Find a student")
print ("3. Add a course")
print ("4. Find a course")
print ("5. Enroll a student")
print ("6. Find Course(s) of a Student")
print ("7. Find Student(s) of a Course")
print ("8. Quit\n")
def add_student():
conn = connect('training.db')
curs = conn.cursor()
x = input("\nEnter student id...")
y = input("Enter student name...")
curs.execute("insert into student (studentid,studentname) values(\"" + x + "\", \""+y+"\");")
conn.commit()
print ("\nStudent " + x + "/" + y + " added successfully!")
conn.close()
def find_student():
conn = connect('training.db')
curs = conn.cursor()
x = input("\nEnter student name...")
curs.execute("select * from student where studentname like \"%" + x + "%\";")
conn.commit()
print("\nHere is the list...")
for (name) in curs.fetchall():
print(name)
conn.close()
def add_course():
conn = connect('training.db')
curs = conn.cursor()
x = input("\nEnter course id...")
y = input("Enter course name...")
curs.execute("insert into course (courseid,coursename) values(\"" + x + "\", \""+y+"\");")
conn.commit()
print ("\nCourse " + x + "/" + y + " added successfully!")
conn.close()
def find_course():
conn = connect('training.db')
curs = conn.cursor()
x = input("\nEnter course name...")
curs.execute("select * from course where coursename like \"%" + x + "%\";")
conn.commit()
print("\nHere is the list...")
for (name) in curs.fetchall():
print(name)
conn.close()
def enroll_student():
conn = connect('training.db')
curs = conn.cursor()
x = input("\nEnter student id...")
y = input("\nEnter course id...")
curs.execute("insert into enrollment(studentid, courseid) values(\""+x+"\",\""+y+"\");")
conn.commit()
print("\nEnrollment of student " + x +"in course" + y + "is successful")
conn.close()
def find_a_course_for_student():
conn = connect('training.db')
curs = conn.cursor()
x = input("\nEnter student id...")
curs.execute("select student.studentname, course.coursename from student, course, enrollment where student.studentid = enrollment.studentid and course.courseid = enrollment.courseid and enrollment.studentid =\""+x+"\";")
conn.commit()
print("\nHere is the list...")
for (name) in curs.fetchall():
print(name)
conn.close()
def find_student_for_course():
conn = connect('training.db')
curs = conn.cursor()
x=input("\nEnter course id...")
curs.execute("select student.studentname, course.coursename from student, course, enrollment where student.studentid = enrollment.studentid and course.courseid = enrollment.courseid and enrollment.courseid = \""+x+"\";")
conn.commit()
print("\nHere is the list...")
for (name) in curs.fetchall():
print(name)
conn.close()
print("\nWelcome To Training Application!!!")
indicator = True
while indicator ==True:
show_menu()
option = input("Select an option....")
if option =="1":
try:
add_student()
except:
print("Error: function is unsuccessful")
elif option == "2":
try:
find_student()
except:
print("Error: function is unsuccessful")
elif option == "3":
try:
add_course()
except:
print("Error: function is unsuccessful")
elif option == "4":
try:
find_course()
except:
print("Error: function is unsuccessful")
elif option == "5":
try:
enroll_student()
except:
print("Error: function is unsuccessful")
elif option == "6":
try:
find_a_course_for_student()
except:
print("Error: function is unsuccessful")
elif option == "7":
try:
find_student_for_course()
except:
print("Error: function is unsuccessful")
elif option == "8":
indicator = False
##raise SystemExit (alternative to indicator = False)
| 32.651515 | 224 | 0.578422 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,830 | 0.424594 |
8cd34bcdcf56fd743fbd7490bad25836dd4f32f3 | 1,723 | py | Python | res_mods/mods/packages/xvm_battle/python/battleloading.py | peterbartha/ImmunoMod | cbf8cd49893d7082a347c1f72c0e39480869318a | [
"MIT"
] | null | null | null | res_mods/mods/packages/xvm_battle/python/battleloading.py | peterbartha/ImmunoMod | cbf8cd49893d7082a347c1f72c0e39480869318a | [
"MIT"
] | 1 | 2016-04-03T13:31:39.000Z | 2016-04-03T16:48:26.000Z | res_mods/mods/packages/xvm_battle/python/battleloading.py | peterbartha/ImmunoMod | cbf8cd49893d7082a347c1f72c0e39480869318a | [
"MIT"
] | null | null | null | """ XVM (c) www.modxvm.com 2013-2017 """
#####################################################################
# imports
import cgi
import re
import traceback
import BigWorld
from gui.Scaleform.daapi.view.battle.shared.battle_loading import BattleLoading
from xfw import *
from xvm_main.python.logger import *
import xvm_main.python.config as config
#####################################################################
# handlers
@overrideMethod(BattleLoading, 'as_setTipTitleS')
def BattleLoading_as_setTipTitleS(base, self, title):
title = cgi.escape('XVM v{} {}'.format(config.get('__xvmVersion'), config.get('__xvmIntro')))
stateInfo = config.get('__stateInfo')
if 'error' in stateInfo:
title = '<font color="#FF4040">{}</font>'.format(title)
elif 'warning' in stateInfo:
title = '<font color="#FFD040">{}</font>'.format(title)
title = '<p align="left"><font size="16">{}</font></p>'.format(title)
return base(self, title)
@overrideMethod(BattleLoading, 'as_setTipS')
def BattleLoading_as_setTipS(base, self, val):
stateInfo = config.get('__stateInfo')
if 'error' in stateInfo and stateInfo['error']:
val = getTipText(stateInfo['error'], True)
elif 'warning' in stateInfo and stateInfo['warning']:
val = getTipText(stateInfo['warning'])
return base(self, val)
def getTipText(text, isError=False):
text = cgi.escape(text)
if isError:
text = re.sub(r'(line #\d+)', r'<font color="#FF4040">\1</font>', text)
text = re.sub(r'([^/\\]+\.xc)', r'<font color="#FF4040">\1</font>', text)
text = '<textformat leading="0"><p align="left"><font size="12">{}</font></p></textformat>'.format(text)
return text
| 35.163265 | 112 | 0.608241 | 0 | 0 | 0 | 0 | 908 | 0.526988 | 0 | 0 | 653 | 0.37899 |
8cd36528540894945ddf92821a73804199e18f8f | 3,021 | py | Python | CV1_assignment3/problem1_Loesung.py | cjy513203427/CV_Assignment | ac837dcd67f0d237017ef0124210bf9da0151487 | [
"MIT"
] | null | null | null | CV1_assignment3/problem1_Loesung.py | cjy513203427/CV_Assignment | ac837dcd67f0d237017ef0124210bf9da0151487 | [
"MIT"
] | null | null | null | CV1_assignment3/problem1_Loesung.py | cjy513203427/CV_Assignment | ac837dcd67f0d237017ef0124210bf9da0151487 | [
"MIT"
] | null | null | null | import numpy as np
from scipy.ndimage import convolve, maximum_filter
def gauss2d(sigma, fsize):
""" Create a 2D Gaussian filter
Args:
sigma: width of the Gaussian filter
fsize: (w, h) dimensions of the filter
Returns:
*normalized* Gaussian filter as (h, w) np.array
"""
m, n = fsize
x = np.arange(-m / 2 + 0.5, m / 2)
y = np.arange(-n / 2 + 0.5, n / 2)
xx, yy = np.meshgrid(x, y, sparse=True)
g = np.exp(-(xx ** 2 + yy ** 2) / (2 * sigma ** 2))
return g / np.sum(g)
def derivative_filters():
""" Create derivative filters for x and y direction
Returns:
fx: derivative filter in x direction
fy: derivative filter in y direction
"""
fx = np.array([[0.5, 0, -0.5]])
fy = fx.transpose()
return fx, fy
def compute_hessian(img, gauss, fx, fy):
""" Compute elements of the Hessian matrix
Args:
img:
gauss: Gaussian filter
fx: derivative filter in x direction
fy: derivative filter in y direction
Returns:
I_xx: (h, w) np.array of 2nd derivatives in x direction
I_yy: (h, w) np.array of 2nd derivatives in y direction
I_xy: (h, w) np.array of 2nd derivatives in x-y direction
"""
#
# You code here
#
# set mode
mode = "mirror"
# smooth image
img = convolve(img, gauss, mode = mode, cval=0)
# first derivatives
I_x = convolve(img, fx, mode = mode, cval=0)
I_y = convolve(img, fy, mode = mode, cval=0)
# second derivatives
I_xx = convolve(I_x, fx, mode = mode, cval=0)
I_xy = convolve(I_x, fy, mode = mode, cval=0)
I_yy = convolve(I_y, fy, mode = mode, cval=0)
return I_xx,I_yy,I_xy
def compute_criterion(I_xx, I_yy, I_xy, sigma):
""" Compute criterion function
Args:
I_xx: (h, w) np.array of 2nd derivatives in x direction
I_yy: (h, w) np.array of 2nd derivatives in y direction
I_xy: (h, w) np.array of 2nd derivatives in x-y direction
sigma: scaling factor
Returns:
criterion: (h, w) np.array of scaled determinant of Hessian matrix
"""
#
# You code here
#
det = I_xx * I_yy - I_xy ** 2
return sigma ** 4 * det
def nonmaxsuppression(criterion, threshold):
""" Apply non-maximum suppression to criterion values
and return Hessian interest points
Args:
criterion: (h, w) np.array of criterion function values
threshold: criterion threshold
Returns:
rows: (n,) np.array with y-positions of interest points
cols: (n,) np.array with x-positions of interest points
"""
#
# You code here
#
criterion_max = maximum_filter(criterion, (5,5), mode= "mirror")
criterion_thresh = np.logical_and(criterion_max > threshold, criterion >= criterion_max)
mask = np.zeros_like(criterion_thresh)
mask[5:-5, 5:-5] = criterion_thresh[5:-5, 5:-5]
rows, cols = np.nonzero(mask)
return rows, cols
| 26.5 | 92 | 0.604436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,651 | 0.546508 |
8cd3b7d036cac276f28d4d2b1c5af63da168cbbd | 15,856 | py | Python | assignment2/comp411/classifiers/fc_net.py | kukalbriiwa7/COMP511_CS231n | 1537e98cdca43fad906e56a22f48d884523414b0 | [
"MIT"
] | 1 | 2022-02-06T19:35:05.000Z | 2022-02-06T19:35:05.000Z | assignment2/comp411/classifiers/fc_net.py | kukalbriiwa7/COMP511_CS231n | 1537e98cdca43fad906e56a22f48d884523414b0 | [
"MIT"
] | null | null | null | assignment2/comp411/classifiers/fc_net.py | kukalbriiwa7/COMP511_CS231n | 1537e98cdca43fad906e56a22f48d884523414b0 | [
"MIT"
] | null | null | null | from builtins import range
from builtins import object
import numpy as np
from comp411.layers import *
from comp411.layer_utils import *
class ThreeLayerNet(object):
"""
A three-layer fully-connected neural network with Leaky ReLU nonlinearity and
softmax loss that uses a modular layer design. We assume an input dimension
of D, a hidden dimension of tuple of (H1, H2) yielding the dimension for the
first and second hidden layer respectively, and perform classification over C classes.
The architecture should be affine - leakyrelu - affine - leakyrelu - affine - softmax.
Note that this class does not implement gradient descent; instead, it
will interact with a separate Solver object that is responsible for running
optimization.
The learnable parameters of the model are stored in the dictionary
self.params that maps parameter names to numpy arrays.
"""
def __init__(self, input_dim=3*32*32, hidden_dim=(64, 32), num_classes=10,
weight_scale=1e-3, reg=0.0, alpha=1e-3):
"""
Initialize a new network.
Inputs:
- input_dim: An integer giving the size of the input
- hidden_dim: A tuple giving the size of the first and second hidden layer respectively
- num_classes: An integer giving the number of classes to classify
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- reg: Scalar giving L2 regularization strength.
- alpha: negative slope of Leaky ReLU layers
"""
self.params = {}
self.reg = reg
self.alpha = alpha
############################################################################
# TODO: Initialize the weights and biases of the three-layer net. Weights #
# should be initialized from a Gaussian centered at 0.0 with #
# standard deviation equal to weight_scale, and biases should be #
# initialized to zero. All weights and biases should be stored in the #
# dictionary self.params, with first layer weights #
# and biases using the keys 'W1' and 'b1', second layer #
# weights and biases using the keys 'W2' and 'b2', #
# and third layer weights and biases using the keys 'W3' and 'b3. #
# #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
self.params['W1'] = weight_scale * np.random.randn(input_dim,hidden_dim[0])
self.params['W2'] = weight_scale * np.random.randn(hidden_dim[0],hidden_dim[1])
self.params['W3'] = weight_scale * np.random.randn(hidden_dim[1],num_classes)
self.params['b1'] = np.zeros(hidden_dim[0])
self.params['b2'] = np.zeros(hidden_dim[1])
self.params['b3'] = np.zeros(num_classes)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
def loss(self, X, y=None):
"""
Compute loss and gradient for a minibatch of data.
Inputs:
- X: Array of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,). y[i] gives the label for X[i].
Returns:
If y is None, then run a test-time forward pass of the model and return:
- scores: Array of shape (N, C) giving classification scores, where
scores[i, c] is the classification score for X[i] and class c.
If y is not None, then run a training-time forward and backward pass and
return a tuple of:
- loss: Scalar value giving the loss
- grads: Dictionary with the same keys as self.params, mapping parameter
names to gradients of the loss with respect to those parameters.
"""
scores = None
############################################################################
# TODO: Implement the forward pass for the three-layer net, computing the #
# class scores for X and storing them in the scores variable. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
W1 = self.params['W1']
W2 = self.params['W2']
W3 = self.params['W3']
b1 = self.params['b1']
b2 = self.params['b2']
b3 = self.params['b3']
X2 , lrelu_cache1 = affine_lrelu_forward(X,W1,b1,{"alpha": self.alpha})
X3 , lrelu_cache2 = affine_lrelu_forward(X2,W2,b2,{"alpha": self.alpha})
scores, affine_cache = affine_forward(X3,W3,b3)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
# If y is None then we are in test mode so just return scores
if y is None:
return scores
loss, grads = 0, {}
############################################################################
# TODO: Implement the backward pass for the three-layer net. Store the loss#
# in the loss variable and gradients in the grads dictionary. Compute data #
# loss using softmax, and make sure that grads[k] holds the gradients for #
# self.params[k]. Don't forget to add L2 regularization! #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
loss, softmax_grad = softmax_loss(scores, y)
loss += 0.5 * self.reg * ( np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3) )
dx3, dw3, db3 = affine_backward(softmax_grad, affine_cache)
dx2, dw2, db2 = affine_lrelu_backward(dx3, lrelu_cache2)
dx1, dw1, db1 = affine_lrelu_backward(dx2, lrelu_cache1)
grads['W3'] = dw3 + self.reg * W3
grads['b3'] = db3
grads['W2'] = dw2 + self.reg * W2
grads['b2'] = db2
grads['W1'] = dw1 + self.reg * W1
grads['b1'] = db1
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
class FullyConnectedNet(object):
"""
A fully-connected neural network with an arbitrary number of hidden layers,
LeakyReLU nonlinearities, and a softmax loss function. This will also implement
dropout optionally. For a network with L layers, the architecture will be
{affine - leakyrelu - [dropout]} x (L - 1) - affine - softmax
where dropout is optional, and the {...} block is repeated L - 1 times.
Similar to the ThreeLayerNet above, learnable parameters are stored in the
self.params dictionary and will be learned using the Solver class.
"""
def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,
dropout=1, reg=0.0, alpha=1e-2,
weight_scale=1e-2, dtype=np.float32, seed=None):
"""
Initialize a new FullyConnectedNet.
Inputs:
- hidden_dims: A list of integers giving the size of each hidden layer.
- input_dim: An integer giving the size of the input.
- num_classes: An integer giving the number of classes to classify.
- dropout: Scalar between 0 and 1 giving dropout strength. If dropout=1 then
the network should not use dropout at all.
- reg: Scalar giving L2 regularization strength.
- alpha: negative slope of Leaky ReLU layers
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- dtype: A numpy datatype object; all computations will be performed using
this datatype. float32 is faster but less accurate, so you should use
float64 for numeric gradient checking.
- seed: If not None, then pass this random seed to the dropout layers. This
will make the dropout layers deterministic so we can gradient check the
model.
"""
self.use_dropout = dropout != 1
self.reg = reg
self.alpha = alpha
self.num_layers = 1 + len(hidden_dims)
self.dtype = dtype
self.params = {}
############################################################################
# TODO: Initialize the parameters of the network, storing all values in #
# the self.params dictionary. Store weights and biases for the first layer #
# in W1 and b1; for the second layer use W2 and b2, etc. Weights should be #
# initialized from a normal distribution centered at 0 with standard #
# deviation equal to weight_scale. Biases should be initialized to zero. #
# #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
dims = np.hstack((input_dim, hidden_dims, num_classes))
for i in range(self.num_layers):
self.params['W%d' % (i + 1)] = weight_scale * np.random.randn(dims[i], dims[i+1])
self.params['b%d' % (i + 1)] = np.zeros(dims[i+1])
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
# When using dropout we need to pass a dropout_param dictionary to each
# dropout layer so that the layer knows the dropout probability and the mode
# (train / test). You can pass the same dropout_param to each dropout layer.
self.dropout_param = {}
if self.use_dropout:
self.dropout_param = {'mode': 'train', 'p': dropout}
if seed is not None:
self.dropout_param['seed'] = seed
# Cast all parameters to the correct datatype
for k, v in self.params.items():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Compute loss and gradient for the fully-connected net.
Input / output: Same as ThreeLayerNet above.
"""
X = X.astype(self.dtype)
mode = 'test' if y is None else 'train'
# Set train/test mode for dropout param since it
# behaves differently during training and testing.
if self.use_dropout:
self.dropout_param['mode'] = mode
scores = None
############################################################################
# TODO: Implement the forward pass for the fully-connected net, computing #
# the class scores for X and storing them in the scores variable. #
# #
# When using dropout, you'll need to pass self.dropout_param to each #
# dropout forward pass. #
# #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
hidden_num = self.num_layers - 1
scores = X
cache_history = []
L2reg = 0
for i in range(hidden_num):
scores, cache = affine_lrelu_forward(scores, self.params['W%d' % (i + 1)], self.params['b%d' % (i + 1)],{"alpha": self.alpha})
cache_history.append(cache)
if self.use_dropout:
scores, cache = dropout_forward(scores, self.dropout_param)
cache_history.append(cache)
L2reg += np.sum(self.params['W%d' % (i + 1)] ** 2)
i += 1
scores, cache = affine_forward(scores, self.params['W%d' % (i + 1)],
self.params['b%d' % (i + 1)])
cache_history.append(cache)
L2reg += np.sum(self.params['W%d' % (i + 1)] ** 2)
L2reg *= 0.5 * self.reg
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
# If test mode return early
if mode == 'test':
return scores
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the backward pass for the fully-connected net. Store the #
# loss in the loss variable and gradients in the grads dictionary. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
# #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
loss, dout = softmax_loss(scores, y)
loss += L2reg
dout, grads['W%d' % (i + 1)], grads['b%d' % (i + 1)] = affine_backward(dout, cache_history.pop())
grads['W%d' % (i + 1)] += self.reg * self.params['W%d' % (i + 1)]
i -= 1
while i >= 0:
if self.use_dropout:
dout = dropout_backward(dout, cache_history.pop())
#else:
dout, grads['W%d' % (i + 1)], grads['b%d' % (i + 1)] = affine_lrelu_backward(dout, cache_history.pop())
grads['W%d' % (i + 1)] += self.reg * self.params['W%d' % (i + 1)]
i -= 1
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
| 50.336508 | 138 | 0.48663 | 15,712 | 0.990918 | 0 | 0 | 0 | 0 | 0 | 0 | 10,522 | 0.663597 |
8cd402c900cdc390c47cfb466e6d949a84a0cc3b | 1,476 | py | Python | py/cloud_server_del.py | AlohaPoster/MyActilife_win | 7922839b6444f63f87b8f9584ad5d89101fa8432 | [
"ISC"
] | null | null | null | py/cloud_server_del.py | AlohaPoster/MyActilife_win | 7922839b6444f63f87b8f9584ad5d89101fa8432 | [
"ISC"
] | null | null | null | py/cloud_server_del.py | AlohaPoster/MyActilife_win | 7922839b6444f63f87b8f9584ad5d89101fa8432 | [
"ISC"
] | null | null | null | import os
from socket import *
from time import ctime
HOST = ''
PORT = 9733
BUFSIZ = 1024
ADDR = (HOST, PORT)
tcpSerSock = socket(AF_INET, SOCK_STREAM)
tcpSerSock.bind(ADDR)
tcpSerSock.listen(32)
while True:
print('waiting for connection...')
tcpCliSock, addr = tcpSerSock.accept()
print ('...connected from:', addr)
data = tcpCliSock.recv(BUFSIZ)
filename = data.decode("utf-8")
if(os.path.exists(filename + ".csv")):
os.remove(filename + ".csv")
else:
tcpCliSock.send("0".encode())
continue
if(os.path.exists(filename + ".pdf")):
os.remove(filename + ".pdf")
else:
tcpCliSock.send("0".encode())
continue
if(os.path.exists(filename + "_"+"Y-axis.png")):
os.remove(filename + "_"+"Y-axis.png")
else:
tcpCliSock.send("0".encode())
continue
if(os.path.exists(filename + "_"+"Z-axis.png")):
os.remove(filename + "_"+"Z-axis.png")
else:
tcpCliSock.send("0".encode())
continue
if(os.path.exists(filename + "_"+"X-axis.png")):
os.remove(filename + "_"+"X-axis.png")
else:
tcpCliSock.send("0".encode())
continue
if(os.path.exists(filename + "_"+"combined.png")):
os.remove(filename + "_"+"combined.png")
else:
tcpCliSock.send("0".encode())
continue
tcpCliSock.send("file finish".encode())
tcpCliSock.close()
tcpSerSock.close() | 25.016949 | 54 | 0.579268 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.159214 |
8cd43ca8bd2a786d14de4833789bfb393dcd55d1 | 1,552 | py | Python | setup.py | PureTryOut/pico-wizard | d5ffe9a777a33c5f5ab5772db84315595d19a50a | [
"MIT"
] | 11 | 2021-02-05T13:49:44.000Z | 2022-02-24T14:14:46.000Z | setup.py | PureTryOut/pico-wizard | d5ffe9a777a33c5f5ab5772db84315595d19a50a | [
"MIT"
] | 26 | 2021-02-12T17:25:34.000Z | 2021-12-30T07:47:18.000Z | setup.py | PureTryOut/pico-wizard | d5ffe9a777a33c5f5ab5772db84315595d19a50a | [
"MIT"
] | 6 | 2021-06-21T17:37:15.000Z | 2022-01-15T16:07:47.000Z | # SPDX-FileCopyrightText: 2021 Anupam Basak <anupam.basak27@gmail.com>
#
# SPDX-License-Identifier: MIT
import setuptools
setuptools.setup(
name="pico-wizard",
version="0.1.0",
author="Anupam Basak",
author_email="anupam.basak27@gmail.com",
description="A Post Installation COnfiguration tool",
long_description="A Post Installation COnfiguration tool for Linux OSes",
long_description_content_type="text/plain",
scripts=["files/pico-wizard-script-runner"],
entry_points={
"console_scripts": [
"pico-wizard = PicoWizard.__main__:__main__",
]
},
url="https://github.com/pico-wizard/pico-wizard",
project_urls={
"Bug Tracker": "https://github.com/pico-wizard/pico-wizard/issues",
"Documentation": "https://github.com/pico-wizard/pico-wizard",
"Source Code": "https://github.com/pico-wizard/pico-wizard",
},
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License"
],
license="MIT",
install_requires=[
### Pyside2 needs to be installed from manjaro repository
### pip doesnt provide prebuilt arm64
# "pyside2"
],
python_requires=">=3.6",
package_data = {
"": [
"*.qml",
"**/*.qml",
"**/*.svg",
"**/*.svg.license",
"**/*.sh",
"**/qmldir",
"PicoWizard/**/*.svg"
]
},
include_package_data=True,
)
| 29.846154 | 77 | 0.589562 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 868 | 0.559278 |
8cd52c1779c479941f4c615d82fabed36c62bce2 | 6,769 | py | Python | modules/weather_forecast_manager.py | algon-320/tenki.py | e1ad137b49f9b93fc350200528a9ccf8de2f1517 | [
"MIT"
] | null | null | null | modules/weather_forecast_manager.py | algon-320/tenki.py | e1ad137b49f9b93fc350200528a9ccf8de2f1517 | [
"MIT"
] | null | null | null | modules/weather_forecast_manager.py | algon-320/tenki.py | e1ad137b49f9b93fc350200528a9ccf8de2f1517 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import datetime
import pickle
import lxml.html
import urllib.request, urllib.error
import re
from modules.weather import Weather
from modules.print_util import String
class WeatherForecastManager:
PICKLE_DUMP_FILE = 'tenki.dump'
XPATH_UPDATED_TIME = r'//*[@id="main-column"]/section/h2/time/text()'
XPATH_POINT_INFO = r'//*[@id="main-column"]/section/h2/text()'
XPATH_ANNOUNCED_TIME_COMMENT = r'//*[@id="main-column"]/section/comment()[contains(., "announce_datetime")]'
XPATH_WEATHER_DATES = r'//*[@id="main-column"]/section/table[%d]/tr[1]/td/div/p/text()'
XPATH_WEATHER_TD = r'//*[@id="main-column"]/section/table[%d]/tr[4]/td'
XPATH_TEMPERATURE_TD = r'//*[@id="main-column"]/section/table[%d]/tr[6]/td'
XPATH_PROB_RAIN_TD = r'//*[@id="main-column"]/section/table[%d]/tr[7]/td'
XPATH_AMOUNT_RAIN_TD = r'//*[@id="main-column"]/section/table[%d]/tr[9]/td'
XPATH_HUMIDITY_TD = r'//*[@id="main-column"]/section/table[%d]/tr[10]/td'
SHOW_OPTS = (
SHOW_WEATHER,
SHOW_TEMPERATURE,
SHOW_PROBABILITY_OF_RAIN,
SHOW_AMOUNT_OF_RAIN,
SHOW_HUMIDITY,
SHOW_WITHOUT_COLORS,
) = map(lambda x: 1 << x, range(6))
SHOW_ALL = SHOW_WEATHER | SHOW_TEMPERATURE | SHOW_PROBABILITY_OF_RAIN | SHOW_AMOUNT_OF_RAIN | SHOW_HUMIDITY
def __init__(self, spot_url):
self.url = spot_url
self.weathers = []
self.updated_time = None
self.point_name = ''
if os.path.exists(WeatherForecastManager.PICKLE_DUMP_FILE):
self.unpickle()
if self.updated_time + datetime.timedelta(hours=1) > datetime.datetime.now() and self.url == spot_url:
return
self.update_weather(spot_url)
def update_weather(self, url):
# print('[debug] checking for updates ...')
try:
html = urllib.request.urlopen(url).read()
except:
print('[error] cannot open URL')
sys.exit(1)
dom = lxml.html.fromstring(html.decode('utf-8'))
updated_time_str = dom.xpath(WeatherForecastManager.XPATH_UPDATED_TIME)[0]
point_info = dom.xpath(WeatherForecastManager.XPATH_POINT_INFO)[0]
self.point_name = re.match(r'(.+)ใฎๅคฉๆฐ', point_info).group(1)
# ๆดๆฐๆฅๆใ่จญๅฎ
comment = dom.xpath(WeatherForecastManager.XPATH_ANNOUNCED_TIME_COMMENT)[0]
comment = lxml.html.tostring(comment, method='html', encoding='unicode')
mat = re.match(r'.*announce_datetime:(\d{4})\-(\d{2})\-\d{2} \d{2}\:\d{2}\:\d{2}', comment)
year = int(mat.group(1))
month = int(mat.group(2))
mat = re.match(r'(\d+)ๆฅ(\d+):(\d+)็บ่กจ', updated_time_str)
day = int(mat.group(1))
hour = int(mat.group(2))
minute = int(mat.group(3))
self.updated_time = datetime.datetime(year, month, day, hour, minute)
self.weathers = []
for k in range(3):
w = Weather()
w.date = dom.xpath(WeatherForecastManager.XPATH_WEATHER_DATES % (k + 1))[0][:-1]
tds_weather = dom.xpath(WeatherForecastManager.XPATH_WEATHER_TD % (k + 1))
tds_temperature = dom.xpath(WeatherForecastManager.XPATH_TEMPERATURE_TD % (k + 1))
tds_probability_of_rain = dom.xpath(WeatherForecastManager.XPATH_PROB_RAIN_TD % (k + 1))
tds_amount_of_rain = dom.xpath(WeatherForecastManager.XPATH_AMOUNT_RAIN_TD % (k + 1))
tds_humidity = dom.xpath(WeatherForecastManager.XPATH_HUMIDITY_TD % (k + 1))
w.weathers = list(map(lambda td: td[1].text, tds_weather))
w.is_past = list(map(lambda td: ('past' in td[0].attrib['src']), tds_weather))
w.temperatures = list(map(lambda td: float(td[0].text), tds_temperature))
w.probability_of_rains = list(map(lambda td: None if td[0].text == '---' else int(td[0].text), tds_probability_of_rain))
w.amount_of_rains = list(map(lambda td: float(td[0].text), tds_amount_of_rain))
w.humidities = list(map(lambda td: int(td[0].text), tds_humidity))
self.weathers.append(w)
self.pickle_data()
def pickle_data(self):
with open(WeatherForecastManager.PICKLE_DUMP_FILE, 'wb') as f:
tmp = (self.url, self.weathers, self.updated_time, self.point_name)
pickle.dump(tmp, f)
def unpickle(self):
with open(WeatherForecastManager.PICKLE_DUMP_FILE, 'rb') as f:
tmp = pickle.load(f)
self.url = tmp[0]
self.weathers = tmp[1]
self.updated_time = tmp[2]
self.point_name = tmp[3]
def print_weather(self, show_opts=None, conky=False, days=2):
if show_opts == None:
show_opts = WeatherForecastManager.SHOW_ALL
max_width = 0
for w in self.weathers:
max_width = max(max_width, String.get_string_width(w.date))
max_width += 6
max_unit_width = 0
for i in range(days):
w = self.weathers[i]
for tmp in w.weathers:
max_unit_width = max(max_unit_width, String.get_string_width(tmp))
print('-' * (max_width + (max_unit_width + 1) * 8))
print('{p}ใฎๅคฉๆฐ ({M}ๆ{D}ๆฅ {h:02d}:{m:02d} ็บ่กจ)'.format(p=self.point_name,
M=self.updated_time.month, D=self.updated_time.day,
h=self.updated_time.hour, m=self.updated_time.minute))
time_labels = ['03ๆ', '06ๆ', '09ๆ', '12ๆ', '15ๆ', '18ๆ', '21ๆ', '24ๆ']
sys.stdout.write((' ' * max_width))
for l in time_labels:
sys.stdout.write(String.center(l, max_unit_width + 1))
sys.stdout.write('\n')
print('=' * (max_width + (max_unit_width + 1) * 8))
for i in range(days):
w = self.weathers[i]
col = bool(show_opts & WeatherForecastManager.SHOW_WITHOUT_COLORS)
if show_opts & WeatherForecastManager.SHOW_WEATHER:
w.print_weather(max_width, max_unit_width, no_color=col, conky=conky)
if show_opts & WeatherForecastManager.SHOW_TEMPERATURE:
w.print_temperature(max_width, max_unit_width, no_color=col, conky=conky)
if show_opts & WeatherForecastManager.SHOW_PROBABILITY_OF_RAIN:
w.print_probability_of_rain(max_width, max_unit_width, no_color=col, conky=conky)
if show_opts & WeatherForecastManager.SHOW_AMOUNT_OF_RAIN:
w.print_amount_of_rain(max_width, max_unit_width, no_color=col, conky=conky)
if show_opts & WeatherForecastManager.SHOW_HUMIDITY:
w.print_humidity(max_width, max_unit_width, no_color=col, conky=conky)
print('=' * (max_width + (max_unit_width + 1) * 8))
| 44.24183 | 132 | 0.626385 | 6,587 | 0.965128 | 0 | 0 | 0 | 0 | 0 | 0 | 923 | 0.135238 |
8cd675d43aba0a61eff6ca1dd6e1d5350ca4da8d | 2,299 | py | Python | app/fiftycents/entities/player.py | Cinquiom/fifty-cents-frontend | 946f564a87127f5820111321cd48441cc414d277 | [
"MIT"
] | null | null | null | app/fiftycents/entities/player.py | Cinquiom/fifty-cents-frontend | 946f564a87127f5820111321cd48441cc414d277 | [
"MIT"
] | null | null | null | app/fiftycents/entities/player.py | Cinquiom/fifty-cents-frontend | 946f564a87127f5820111321cd48441cc414d277 | [
"MIT"
] | null | null | null | import math
from collections import Counter
from itertools import dropwhile
class NoCoinsRemainingError(Exception): pass
class CardNotInHandError(Exception): pass
class Player():
def __init__(self):
self.hand = []
self.played_cards = []
self.down = False
self.coins = 10
self.total_score = 0
def add_card(self, card):
self.hand.append(card)
def get_current_score(self):
up = sum([c.value for c in self.played_cards])
down = sum([c.value for c in self.hand])
return up - down
# Messy function to determine if a hand is valid.
# Steps:
# 1) Get all normal cards
# 2) Get all wildcards
# 3)
def get_playable_sets(self, hand, n, s):
counter = Counter([c.rank for c in hand])
wildcards = counter.pop('2', 0) + counter.pop('JOKER', 0)
for k, v in dropwhile(lambda x: x[1] >= math.ceil(s/2.0), counter.most_common()):
del counter[k]
playable_sets = {}
playable_sets_num = 0
for k in counter:
# Special exception: splitting extra-large pairs into two sets
if n > 1 and counter[k] >= s and wildcards >= n*s - counter[k]:
playable_sets[k] = (counter[k], n*s - counter[k])
wildcards = wildcards - n*s + counter[k]
playable_sets_num += 1
else:
wildcards_needed = s - counter[k]
if wildcards_needed <= wildcards:
playable_sets[k] = (counter[k], max(wildcards_needed, 0))
wildcards = wildcards - max(wildcards_needed, 0)
playable_sets_num += len(playable_sets)
return playable_sets if playable_sets_num >= n else None
def check_playable_sets(self, hand, set_size, set_num):
return self.get_playable_sets(hand, set_size, set_num) != None
def spend_coin(self):
if self.coins <= 0:
raise NoCoinsRemainingError
self.coins = self.coins - 1
def toss(self, card):
for c in self.hand:
if card == c.rank:
self.hand.remove(c)
return c
raise CardNotInHandError
| 33.318841 | 89 | 0.558504 | 2,210 | 0.961288 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.082645 |
8cd6adc7013685ced3271a7d83a19d006061f4cb | 4,137 | py | Python | day03.py | dylanbrodiefafard/aoc2019 | fb92517f298c4bfab8be0d615deaa7953aa60c71 | [
"MIT"
] | null | null | null | day03.py | dylanbrodiefafard/aoc2019 | fb92517f298c4bfab8be0d615deaa7953aa60c71 | [
"MIT"
] | null | null | null | day03.py | dylanbrodiefafard/aoc2019 | fb92517f298c4bfab8be0d615deaa7953aa60c71 | [
"MIT"
] | null | null | null | from util import get_lines
def make_lines(path):
lines = []
previous_point = (0, 0)
for segment in path:
direction = segment[0]
distance = int(segment[1:])
if direction == 'U':
point = (previous_point[0], previous_point[1] + distance)
elif direction == 'D':
point = (previous_point[0], previous_point[1] - distance)
elif direction == 'L':
point = (previous_point[0] - distance, previous_point[1])
elif direction == 'R':
point = (previous_point[0] + distance, previous_point[1])
else:
raise ValueError('{} is not a valid direction!'.format(direction))
lines.append((previous_point, point))
previous_point = point
return lines
def intersect(line1, line2):
# Source: https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection
(x1, y1), (x2, y2) = line1
(x3, y3), (x4, y4) = line2
try:
t = ((x1 - x3) * (y3 - y4) - (y1 - y3) * (x3 - x4)) / ((x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4))
u = -((x1 - x2) * (y1 - y3) - (y1 - y2) * (x1 - x3)) / ((x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4))
except ZeroDivisionError:
return None
if 0 <= t <= 1 and 0 <= u <= 1:
# first conditional implies the intersection is on the first line segment
# second conditional implies the intersection is on the second line segment
# therefore, the line segments intersect
return x1 + t * (x2 - x1), y1 + t * (y2 - y1)
else:
return None
def make_vec(p1, p2):
# Make a vector from p1 pointing at p2
return p2[0] - p1[0], p2[1] - p1[1]
def l1(point, other_point=None):
# if other_point is None, assume point is a vector
vec = point if other_point is None else make_vec(point, other_point)
return sum(map(abs, vec)) # Manhatten distance
def minimum_manhatten_distace(wire1_lines, wire2_lines):
min_distance = float('inf')
for line_1 in wire1_lines:
for line_2 in wire2_lines:
intersection = intersect(line_1, line_2)
if intersection is not None and intersection != (0.0, 0.0):
min_distance = min(min_distance, l1(intersection))
return min_distance
def minimum_wire_steps(wire1_lines, wire2_lines):
min_wire_steps = float('inf')
wire1_steps = 0
for line1 in wire1_lines:
wire2_steps = 0
for line2 in wire2_lines:
intersection = intersect(line1, line2)
if intersection is not None and intersection != (0.0, 0.0):
# the total steps of the wires plus the steps to the intersection
wires_path_length = (wire1_steps + l1(line1[0], intersection) +
wire2_steps + l1(line2[0], intersection))
min_wire_steps = min(min_wire_steps, wires_path_length)
wire2_steps += l1(line2[0], line2[1]) # Add up the steps as we go
wire1_steps += l1(line1[0], line1[1]) # Add up the steps as we go
return min_wire_steps
if __name__ == '__main__':
# Part 1 examples
assert minimum_manhatten_distace(make_lines('R75,D30,R83,U83,L12,D49,R71,U7,L72'.split(',')),
make_lines('U62,R66,U55,R34,D71,R55,D58,R83'.split(','))) == 159
assert minimum_manhatten_distace(make_lines('R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51'.split(',')),
make_lines('U98,R91,D20,R16,D67,R40,U7,R15,U6,R7'.split(','))) == 135
# Part 2 examples
assert minimum_wire_steps(make_lines('R75,D30,R83,U83,L12,D49,R71,U7,L72'.split(',')),
make_lines('U62,R66,U55,R34,D71,R55,D58,R83'.split(','))) == 610
assert minimum_wire_steps(make_lines('R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51'.split(',')),
make_lines('U98,R91,D20,R16,D67,R40,U7,R15,U6,R7'.split(','))) == 410
formatted_input = [make_lines(line.split(',')) for line in get_lines('input/day03.txt')]
print(minimum_manhatten_distace(*formatted_input))
print(minimum_wire_steps(*formatted_input))
| 41.37 | 110 | 0.597051 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 929 | 0.224559 |
8cd9ed3c436765c3683fb3296f85b163ac9cc4e2 | 6,162 | py | Python | ezflow/models/pwcnet.py | NeelayS/ezflow | b93a48c4adf5021f7eacbfc43220c7efa5ae55cd | [
"MIT"
] | 94 | 2021-11-18T18:31:18.000Z | 2022-03-04T02:30:13.000Z | ezflow/models/pwcnet.py | NeelayS/ezflow | b93a48c4adf5021f7eacbfc43220c7efa5ae55cd | [
"MIT"
] | 72 | 2021-11-19T16:59:10.000Z | 2022-03-02T14:39:10.000Z | ezflow/models/pwcnet.py | neu-vig/ezflow | 1eb6f675e72b1de6db7b35d61ca4ef0082bae890 | [
"MIT"
] | 5 | 2021-11-18T18:42:38.000Z | 2022-03-03T11:35:26.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from ..decoder import ConvDecoder
from ..encoder import build_encoder
from ..modules import conv, deconv
from ..similarity import CorrelationLayer
from ..utils import warp
from .build import MODEL_REGISTRY
@MODEL_REGISTRY.register()
class PWCNet(nn.Module):
"""
Implementation of the paper
`PWC-Net: CNNs for Optical Flow Using Pyramid, Warping, and Cost Volume <https://arxiv.org/abs/1709.02371>`_
Parameters
----------
cfg : :class:`CfgNode`
Configuration for the model
"""
def __init__(self, cfg):
super(PWCNet, self).__init__()
self.cfg = cfg
self.encoder = build_encoder(cfg.ENCODER)
self.correlation_layer = CorrelationLayer(
pad_size=cfg.SIMILARITY.PAD_SIZE,
max_displacement=cfg.SIMILARITY.MAX_DISPLACEMENT,
)
search_range = (2 * cfg.SIMILARITY.MAX_DISPLACEMENT + 1) ** 2
self.decoder_layers = nn.ModuleList()
decoder_cfg = cfg.DECODER.CONFIG
self.up_feature_layers = nn.ModuleList()
for i in range(len(decoder_cfg)):
if i == 0:
concat_channels = search_range
else:
concat_channels = (
search_range + decoder_cfg[i] + cfg.SIMILARITY.MAX_DISPLACEMENT
)
self.decoder_layers.append(
ConvDecoder(
config=decoder_cfg,
to_flow=True,
concat_channels=concat_channels,
)
)
self.up_feature_layers.append(
deconv(
concat_channels + sum(decoder_cfg),
2,
kernel_size=4,
stride=2,
padding=1,
)
)
self.deconv_layers = nn.ModuleList()
for i in range(len(decoder_cfg)):
self.deconv_layers.append(deconv(2, 2, kernel_size=4, stride=2, padding=1))
self.dc_conv = nn.ModuleList(
[
conv(
search_range
+ cfg.SIMILARITY.MAX_DISPLACEMENT
+ decoder_cfg[-1]
+ sum(decoder_cfg),
128,
kernel_size=3,
stride=1,
padding=1,
dilation=1,
),
]
)
self.dc_conv.append(
conv(
decoder_cfg[0],
decoder_cfg[0],
kernel_size=3,
stride=1,
padding=2,
dilation=2,
)
)
padding = 4
dilation = 4
for i in range(len(decoder_cfg) - 2):
self.dc_conv.append(
conv(
decoder_cfg[i],
decoder_cfg[i + 1],
kernel_size=3,
stride=1,
padding=padding,
dilation=dilation,
)
)
padding *= 2
dilation *= 2
self.dc_conv.append(
conv(
decoder_cfg[3],
decoder_cfg[4],
kernel_size=3,
stride=1,
padding=1,
dilation=1,
)
)
self.dc_conv.append(
nn.Conv2d(32, 2, kernel_size=3, stride=1, padding=1, bias=True)
)
self.dc_conv = nn.Sequential(*self.dc_conv)
self._init_weights()
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_normal_(m.weight.data, mode="fan_in")
if m.bias is not None:
m.bias.data.zero_()
def _corr_relu(self, features1, features2):
corr = self.correlation_layer(features1, features2)
return F.leaky_relu(corr, negative_slope=0.1)
def forward(self, img1, img2):
"""
Performs forward pass of the network
Parameters
----------
img1 : torch.Tensor
Image to predict flow from
img2 : torch.Tensor
Image to predict flow to
Returns
-------
torch.Tensor
Flow from img1 to img2
"""
H, W = img1.shape[-2:]
feature_pyramid1 = self.encoder(img1)
feature_pyramid2 = self.encoder(img2)
up_flow, up_features = None, None
up_flow_scale = 0.625
flow_preds = []
for i in range(len(self.decoder_layers)):
if i == 0:
corr = self._corr_relu(feature_pyramid1[i], feature_pyramid2[i])
concatenated_features = corr
else:
warped_features = warp(feature_pyramid2[i], up_flow * up_flow_scale)
up_flow_scale *= 2
corr = self._corr_relu(feature_pyramid1[i], warped_features)
concatenated_features = torch.cat(
[corr, feature_pyramid1[i], up_flow, up_features], dim=1
)
flow, features = self.decoder_layers[i](concatenated_features)
flow_preds.append(flow)
up_flow = self.deconv_layers[i](flow)
up_features = self.up_feature_layers[i](features)
flow_preds.reverse()
flow_preds[0] += self.dc_conv(features)
if self.training:
return flow_preds
else:
flow = flow_preds[0]
if self.cfg.INTERPOLATE_FLOW:
H_, W_ = flow.shape[-2:]
flow = F.interpolate(
flow, img1.shape[-2:], mode="bilinear", align_corners=True
)
flow_u = flow[:, 0, :, :] * (W / W_)
flow_v = flow[:, 1, :, :] * (H / H_)
flow = torch.stack([flow_u, flow_v], dim=1)
if self.cfg.FLOW_SCALE_FACTOR is not None:
flow *= self.cfg.FLOW_SCALE_FACTOR
return flow
| 28.009091 | 112 | 0.500649 | 5,858 | 0.950665 | 0 | 0 | 5,885 | 0.955047 | 0 | 0 | 588 | 0.095424 |
8cda02454cc4767296a2ee587812bf506b372867 | 1,361 | py | Python | setup.py | hackebrot/cibopath | 7b341cb92942a0ed70e21c9e5f23d281a625e30c | [
"BSD-3-Clause"
] | 11 | 2016-02-08T11:45:26.000Z | 2017-05-19T16:07:31.000Z | setup.py | hackebrot/cibopath | 7b341cb92942a0ed70e21c9e5f23d281a625e30c | [
"BSD-3-Clause"
] | 5 | 2016-02-11T22:11:54.000Z | 2016-06-09T20:54:07.000Z | setup.py | hackebrot/cibopath | 7b341cb92942a0ed70e21c9e5f23d281a625e30c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import pathlib
from setuptools import setup
def read(file_name):
file_path = pathlib.Path(__file__).parent / file_name
return file_path.read_text('utf-8')
setup(
name='cibopath',
version='0.1.0',
author='Raphael Pierzina',
author_email='raphael@hackebrot.de',
maintainer='Raphael Pierzina',
maintainer_email='raphael@hackebrot.de',
license='BSD',
url='https://github.com/hackebrot/cibopath',
description='Search Cookiecutters on GitHub.',
long_description=read('README.rst'),
packages=[
'cibopath',
],
package_dir={'cibopath': 'cibopath'},
include_package_data=True,
zip_safe=False,
install_requires=[
'click',
'aiohttp',
],
entry_points={
'console_scripts': [
'cibopath = cibopath.cli:main',
]
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
keywords=['cookiecutter', 'web scraping', 'asyncio', 'command-line'],
)
| 26.686275 | 73 | 0.61058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 676 | 0.496694 |
8cda297e14662b818246dabf96df5f485117391e | 1,448 | py | Python | core/tests/test_models.py | maneeshbabu/recipe | 8cb02bf524f7676b11ec68fb4e6e518dea6b3456 | [
"MIT"
] | null | null | null | core/tests/test_models.py | maneeshbabu/recipe | 8cb02bf524f7676b11ec68fb4e6e518dea6b3456 | [
"MIT"
] | null | null | null | core/tests/test_models.py | maneeshbabu/recipe | 8cb02bf524f7676b11ec68fb4e6e518dea6b3456 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTestCase(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with email is successful"""
email = "test@example.com"
password = "test123"
user = get_user_model().objects.create_user(email=email,
password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test creating a new user with email is normalized"""
email = "test@EXAMPLE.com"
user = get_user_model().objects.create_user(email=email,
password='test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating a new user with invalid email raise exception"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(email=None,
password='test123')
def test_new_user_is_superuser(self):
"""Test creating a new super user"""
user = get_user_model().objects.create_superuser(
email="test@example.com", password="test123")
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| 37.128205 | 73 | 0.619475 | 1,364 | 0.941989 | 0 | 0 | 0 | 0 | 0 | 0 | 301 | 0.207873 |
8cddfb293ba557275c826d5a51078f1b1456befd | 5,980 | py | Python | CIF_Assembly_Entry_debug.py | cschlick/cif-assembly | 84c24b6d1dd4c878be8b9fba69d3a0c7e2d81382 | [
"MIT"
] | null | null | null | CIF_Assembly_Entry_debug.py | cschlick/cif-assembly | 84c24b6d1dd4c878be8b9fba69d3a0c7e2d81382 | [
"MIT"
] | null | null | null | CIF_Assembly_Entry_debug.py | cschlick/cif-assembly | 84c24b6d1dd4c878be8b9fba69d3a0c7e2d81382 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
from mmtbx.ncs.ncs import ncs
from phenix.programs import map_symmetry as map_symmetry_program
from cctbx.maptbx.segment_and_split_map import run_get_ncs_from_map
from iotbx import phil
from iotbx.data_manager import DataManager
# initialize data manager
dm = DataManager()
# file IO
# this map has origin at (0,0,0)
# dm.process_model_file("../6ui6_fit_in_corner_map.pdb")
# dm.process_real_map_file("../emd_20669_corner_zero.map")
# this map has origin at data.all()/2
dm.process_model_file("../6ui6.pdb")
dm.process_real_map_file("../emd_20669.map")
# Run Tom's map_symmetry tool
mm = dm.get_real_map()
mm.shift_origin()
mm.find_map_symmetry()
print(type(mm._ncs_object) # this was not set...
# debug find_map_symmetry()
self = mm
include_helical_symmetry = False
symmetry_center = None
min_ncs_cc = None
symmetry = None
ncs_object = None
check_crystal_symmetry = True
only_proceed_if_crystal_symmetry = False
assert self.origin_is_zero()
self._warning_message = ""
self._ncs_cc = None
from cctbx.maptbx.segment_and_split_map import \
run_get_ncs_from_map, get_params
if symmetry is None:
symmetry = 'ALL'
if symmetry_center is None:
# Most likely map center is (1/2,1/2,1/2) in full grid
full_unit_cell=self.unit_cell_crystal_symmetry(
).unit_cell().parameters()[:3]
symmetry_center=[]
for x, sc in zip(full_unit_cell, self.shift_cart()):
# version 1 # original version gives incorrect symmetry
#symmetry_center.append(0.5*x + sc)
# version 2 # this finds I(b), the correct symmetry
symmetry_center.append(0.5*x)
symmetry_center = tuple(symmetry_center)
params = get_params(args=[],
symmetry = symmetry,
include_helical_symmetry = include_helical_symmetry,
symmetry_center = symmetry_center,
min_ncs_cc = min_ncs_cc,
return_params_only = True,
)
space_group_number = None
if check_crystal_symmetry and symmetry == 'ALL' and (not ncs_object):
# See if we can narrow it down looking at intensities at low-res
d_min = 0.05*self.crystal_symmetry().unit_cell().volume()**0.333
map_coeffs = self.map_as_fourier_coefficients(d_min=d_min)
from iotbx.map_model_manager import get_map_coeffs_as_fp_phi
f_array_info = get_map_coeffs_as_fp_phi(map_coeffs, d_min = d_min,
n_bins = 15)
ampl = f_array_info.f_array
data = ampl.customized_copy(
data = ampl.data(),sigmas = flex.double(ampl.size(),1.))
from mmtbx.scaling.twin_analyses import symmetry_issues
si = symmetry_issues(data)
cs_possibility = si.xs_with_pg_choice_in_standard_setting
space_group_number = cs_possibility.space_group_number()
# # neccessary to remove or for me it will return None
# if space_group_number < 2:
# space_group_number = None
# if space_group_number is None and only_proceed_if_crystal_symmetry:
# return # skip looking further
params.reconstruction_symmetry.\
must_be_consistent_with_space_group_number = space_group_number
new_ncs_obj, ncs_cc, ncs_score = run_get_ncs_from_map(params = params,
map_data = self.map_data(),
crystal_symmetry = self.crystal_symmetry(),
out = sys.stdout,
ncs_obj = ncs_object)
# Build cif model from ncs_obj
from iotbx import cif
model = dm.get_model()
h = model.get_hierarchy()
chains = [c.id for c in h.chains()]
n_oper = ncs_group.n_ncs_oper()
# start cif building
builder = cif.builders.cif_model_builder()
builder.add_data_block("assembly_information")
# add pdbx_struct_assembly loop
headers = ['_pdbx_struct_assembly.id',
'_pdbx_struct_assembly.details',
'_pdbx_struct_assembly.method_details',
'_pdbx_struct_assembly.oligomeric_details',
'_pdbx_struct_assembly.oligomeric_count']
columns = [["1"],["Symmetry assembly "+ncs_obj.get_ncs_name()],["?"],["?"],["?"]]
builder.add_loop(headers,columns)
# add pdbx_struct_assembly_gen loop
headers = ['_pdbx_struct_assembly_gen.assembly_id',
'_pdbx_struct_assembly_gen.oper_expression',
'_pdbx_struct_assembly_gen.asym_id_list']
columns = [["1"],["(1-"+str(n_oper)+")"],[','.join(chains)]]
builder.add_loop(headers,columns)
# add pdbx_struct_oper_list loop
headers = ['_pdbx_struct_oper_list.id',
'_pdbx_struct_oper_list.type',
'_pdbx_struct_oper_list.name',
'_pdbx_struct_oper_list.symmetry_operation',
'_pdbx_struct_oper_list.matrix[1][1]',
'_pdbx_struct_oper_list.matrix[1][2]',
'_pdbx_struct_oper_list.matrix[1][3]',
'_pdbx_struct_oper_list.vector[1]',
'_pdbx_struct_oper_list.matrix[2][1]',
'_pdbx_struct_oper_list.matrix[2][2]',
'_pdbx_struct_oper_list.matrix[2][3]',
'_pdbx_struct_oper_list.vector[2]',
'_pdbx_struct_oper_list.matrix[3][1]',
'_pdbx_struct_oper_list.matrix[3][2]',
'_pdbx_struct_oper_list.matrix[3][3]',
'_pdbx_struct_oper_list.vector[3]']
_id = list(range(1,n_oper+1))
_type = ['point symmetry operation' for i in range(n_oper)]
_name = ['?' for i in range(n_oper)]
_symmetry_operation = ['?' for i in range(n_oper)]
info_columns = [_id,_type,_name,_symmetry_operation]
rotations = [[r[i] for r in ncs_group.rota_matrices_inv()] for i in range(9)]
#translations = [[t[i] for t in ncs_group.translations_orth_inv()] for i in range(3)]
translations = [[0.0 for t in ncs_group.translations_orth_inv()] for i in range(3)] # debug, translations are not meaningful
numeric_columns = [rotations[0],rotations[1],rotations[2],translations[0],
rotations[3],rotations[4],rotations[5],translations[1],
rotations[6],rotations[7],rotations[8],translations[2]]
columns = info_columns+numeric_columns
builder.add_loop(headers,columns)
# Combine cif string with mmcif string.
from StringIO import StringIO
output = StringIO()
cif_model = builder.model()
cif_model = cif_model[cif_model.keys()[0]]
cif_model.show(indent="",out=output)
filestring = "data_default\n"+output.getvalue().replace("data_information\n","")+dm.get_model().model_as_mmcif().replace("data_default\n","")
dm.write_model_file(filestring,filename="../6ui6_processed.cif",overwrite=True)
| 32.150538 | 141 | 0.756355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,209 | 0.369398 |
8cde5b4f2684c24920a0982cb1a8680ed1ad64f4 | 1,839 | py | Python | SanGuoSha/SGS-Official/assets/images/plotters.py | fyabc/GamesDiy | b1d8dbc735a0fde42e7d216932d1d91ea4477176 | [
"MIT"
] | null | null | null | SanGuoSha/SGS-Official/assets/images/plotters.py | fyabc/GamesDiy | b1d8dbc735a0fde42e7d216932d1d91ea4477176 | [
"MIT"
] | null | null | null | SanGuoSha/SGS-Official/assets/images/plotters.py | fyabc/GamesDiy | b1d8dbc735a0fde42e7d216932d1d91ea4477176 | [
"MIT"
] | null | null | null | # coding: utf-8
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import numpy as np
def card_num_distribution():
"""Plot `Std-CardNumDistribution.png`."""
total = np.fromstring('12 14 12 12 13 12 12 12 12 12 12 14 12', sep=' ')
jb = np.fromstring('0 6 6 6 6 8 9 11 11 10 7 3 2', sep=' ')
jn = np.fromstring('7 1 6 6 0 3 3 1 1 2 5 9 6', sep=' ')
zb = np.fromstring('5 7 0 0 7 1 0 0 0 0 0 2 4', sep=' ')
jb /= total
jn /= total
zb /= total
x = np.arange(1, 14, 1)
xlabels = 'A 2 3 4 5 6 7 8 9 10 J Q K'.split()
plt.plot(x, jb, '*-', color='k', label='ๅบๆฌ็')
plt.plot(x, jn, 'o-', color='b', label='้ฆๅ็')
plt.plot(x, zb, '+-', color='r', label='่ฃ
ๅค็')
plt.legend()
plt.grid()
plt.ylim(ymin=-0.01, ymax=1.01)
ax = plt.gca()
ax.yaxis.set_major_formatter(mticker.PercentFormatter(1.0))
plt.xticks(x, xlabels)
plt.show()
def card_suit_distribution():
"""Plot `Std-CardSuitDistribution.png`."""
jb = np.fromstring('14 22 20 29', sep=' ')
jn = np.fromstring('16 15 14 5', sep=' ')
zb = np.fromstring('10 3 6 7', sep=' ')
total = np.fromstring('40 40 40 41', sep=' ')
jb /= total
jn /= total
zb /= total
x = np.arange(1, 5, 1)
xlabels = '้ปๆก ็บขๆก ่่ฑ ๆน็'.split()
plt.bar(x - 0.2, jb, color='k', width=0.2, label='ๅบๆฌ็')
plt.bar(x, jn, color='b', width=0.2, label='้ฆๅ็')
plt.bar(x + 0.2, zb, color='r', width=0.2, label='่ฃ
ๅค็')
plt.legend()
plt.grid()
ax = plt.gca()
ax.yaxis.set_major_formatter(mticker.PercentFormatter(1.0))
plt.xticks(x, xlabels)
plt.show()
def main():
matplotlib.rc('font',**{
'sans-serif': 'Microsoft YaHei'
})
# card_num_distribution()
card_suit_distribution()
if __name__ == '__main__':
main()
| 24.851351 | 76 | 0.573681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 517 | 0.2734 |
8cdedf2105a7978025f71cf15096d9366805d58f | 1,969 | py | Python | tests/test_model/test_tasks/test_tuning.py | ak-gupta/nbaspa | db961717bb23854e0373b7732638021a18d909f5 | [
"MIT"
] | 1 | 2021-02-21T00:44:06.000Z | 2021-02-21T00:44:06.000Z | tests/test_model/test_tasks/test_tuning.py | ak-gupta/nbaspa | db961717bb23854e0373b7732638021a18d909f5 | [
"MIT"
] | 22 | 2021-02-21T16:41:39.000Z | 2021-11-27T16:12:33.000Z | tests/test_model/test_tasks/test_tuning.py | ak-gupta/nbaspa | db961717bb23854e0373b7732638021a18d909f5 | [
"MIT"
] | null | null | null | """Test hyperparameter tuning."""
import pytest
from nbaspa.model.tasks import (
LifelinesTuning,
SegmentData,
SurvivalData,
XGBoostTuning,
)
@pytest.fixture(scope="module")
def survivaldata(data):
"""Create survival data for the hyperparameter tuning."""
pre = SurvivalData()
df = pre.run(data)
seg = SegmentData()
segdata = seg.run(data=df, splits=[0.6, 0.2, 0.2], keys=["train", "tune", "stop"], seed=42)
return segdata["train"], segdata["tune"], segdata["stop"]
def test_lifelines_tuning(survivaldata):
"""Test running hyperparameter tuning with Lifelines."""
train, tune, _ = survivaldata
# Drop nulls because of weirdness with fitting lifelines on random data
train = train.dropna()
tune = tune.dropna()
tsk = LifelinesTuning()
output = tsk.run(
train_data=train,
tune_data=[tune],
max_evals=10,
)
assert isinstance(output, dict)
assert len(output["trials"].trials) <= 10
assert "l1_ratio" in output["best"]
assert "penalizer" in output["best"]
assert hasattr(tsk, "best_")
assert hasattr(tsk, "metric_")
def test_xgboost_tuning(survivaldata):
"""Test running hyperparameter tuning with XGBoost."""
train, tune, stop = survivaldata
tsk = XGBoostTuning()
output = tsk.run(
train_data=train,
tune_data=[tune],
stopping_data=stop,
max_evals=10,
)
assert isinstance(output, dict)
assert len(output["trials"].trials) <= 10
assert hasattr(tsk, "best_")
assert hasattr(tsk, "metric_")
assert "learning_rate" in output["best"]
assert "subsample" in output["best"]
assert "max_delta_step" in output["best"]
assert "max_depth" in output["best"]
assert "gamma" in output["best"]
assert "reg_alpha" in output["best"]
assert "reg_lambda" in output["best"]
assert "colsample_bytree" in output["best"]
assert "min_child_weight" in output["best"]
| 29.833333 | 95 | 0.657186 | 0 | 0 | 0 | 0 | 348 | 0.176739 | 0 | 0 | 571 | 0.289995 |
8ce0ee53bb5fb1aa40bed29ff3cb17d7bb85ac52 | 524 | py | Python | trace_feature/models/simple_scenario.py | trace-features-bdd/trace_feature | e372b3188d5d77c58789afdb1e39d25e2cf84785 | [
"MIT"
] | null | null | null | trace_feature/models/simple_scenario.py | trace-features-bdd/trace_feature | e372b3188d5d77c58789afdb1e39d25e2cf84785 | [
"MIT"
] | null | null | null | trace_feature/models/simple_scenario.py | trace-features-bdd/trace_feature | e372b3188d5d77c58789afdb1e39d25e2cf84785 | [
"MIT"
] | null | null | null | from trace_feature.models.scenario import Scenario
class SimpleScenario(Scenario):
def __init__(self):
self.steps = []
self.scenario_title = ""
self.line = None
self.executed_methods = []
def execute(self):
pass
def set_line(self):
pass
def __str__(self):
print("\n Title: " + self.scenario_title)
self.print_methods()
return ""
def print_methods(self):
for method in self.executed_methods:
print(method)
| 20.153846 | 50 | 0.593511 | 471 | 0.898855 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.030534 |
8ce17a97e863e42c948a052d4db20da7594b80e9 | 6,771 | py | Python | EMLABPY/modules/makefinancialreports.py | TradeRES/toolbox-amiris-emlab | 11e6e7101bfbc0d71753e3892d4463c4955d2c34 | [
"Unlicense"
] | null | null | null | EMLABPY/modules/makefinancialreports.py | TradeRES/toolbox-amiris-emlab | 11e6e7101bfbc0d71753e3892d4463c4955d2c34 | [
"Unlicense"
] | null | null | null | EMLABPY/modules/makefinancialreports.py | TradeRES/toolbox-amiris-emlab | 11e6e7101bfbc0d71753e3892d4463c4955d2c34 | [
"Unlicense"
] | null | null | null | from domain.import_object import *
from modules.defaultmodule import DefaultModule
from domain.financialReports import FinancialPowerPlantReport
from domain.powerplant import PowerPlant
from domain.cashflow import CashFlow
from domain.technologies import *
import logging
class CreatingFinancialReports(DefaultModule):
def __init__(self, reps):
super().__init__("Creating Financial Reports", reps)
reps.dbrw.stage_init_financial_results_structure()
def act(self):
# fuelPriceMap = {}
# for substance in self.reps.substances:
# fuelPriceMap.update({substance: findLastKnownPriceForSubstance(substance)})
#TODO WHY findAllPowerPlantsWhichAreNotDismantledBeforeTick(self.reps.current_tick - 2)
self.createFinancialReportsForPowerPlantsAndTick(self.reps.power_plants, self.reps.current_tick)
print("finished financial report")
def createFinancialReportsForNewInvestments(self):
self.createFinancialReportsForPowerPlantsAndTick(self.reps.findAllPowerPlantsWithConstructionStartTimeInTick(self.reps.current_tick), self.reps.current_tick)
def createFinancialReportsForPowerPlantsAndTick(self, plants, tick): # todo -> probably this is needed only for operational power plants
financialPowerPlantReports = []
for plant in plants.values():
financialPowerPlantReport = FinancialPowerPlantReport(plant.name, self.reps)
financialPowerPlantReport.setTime(tick)
financialPowerPlantReport.setPowerPlant(plant.name)
totalSupply = plant.getAwardedPowerinMWh()
financialPowerPlantReport.setProduction(totalSupply)
financialPowerPlantReport.setSpotMarketRevenue(plant.ReceivedMoneyinEUR)
financialPowerPlantReport.setProfit(plant.Profit)
financialPowerPlantReports.append(financialPowerPlantReport)
self.reps.dbrw.stage_financial_results(financialPowerPlantReports)
# if plant.getFuelMix() is None:
# plant.setFuelMix(java.util.HashSet())
# for share in plant.getFuelMix():
# amount = share.getShare() * totalSupply
# substance = share.getSubstance()
# substanceCost = findLastKnownPriceForSubstance(substance) * amount
# financialPowerPlantReport.setCommodityCosts(financialPowerPlantReport.getCommodityCosts() + substanceCost)
#TODO add cash flows
#cashFlows = self.reps.getCashFlowsForPowerPlant(plant, tick)
#financialPowerPlantReport.setCo2Costs(self.calculateCO2CostsOfPowerPlant(cashFlows))
#financialPowerPlantReport.setVariableCosts(financialPowerPlantReport.getCommodityCosts() + financialPowerPlantReport.getCo2Costs())
#Determine fixed costs
#financialPowerPlantReport.setFixedCosts(self.calculateFixedCostsOfPowerPlant(cashFlows))
#financialPowerPlantReport.setFixedOMCosts(self.calculateFixedOMCostsOfPowerPlant(cashFlows))
#financialPowerPlantReport.setStrategicReserveRevenue(self.calculateStrategicReserveRevenueOfPowerPlant(cashFlows))
#financialPowerPlantReport.setCapacityMarketRevenue(self.calculateCapacityMarketRevenueOfPowerPlant(cashFlows))
#financialPowerPlantReport.setCo2HedgingRevenue(self.calculateCO2HedgingRevenueOfPowerPlant(cashFlows))
#financialPowerPlantReport.setOverallRevenue(financialPowerPlantReport.getCapacityMarketRevenue() + financialPowerPlantReport.getCo2HedgingRevenue() + financialPowerPlantReport.getSpotMarketRevenue() + financialPowerPlantReport.getStrategicReserveRevenue())
# Calculate Full load hours
#financialPowerPlantReport.setFullLoadHours(self.reps.calculateFullLoadHoursOfPowerPlant(plant, tick))
#
# def calculateSpotMarketRevenueOfPowerPlant(self, cashFlows):
# toReturn = cashFlows.stream().filter(lambda p : p.getType() == emlab.gen.domain.contract.CashFlow.ELECTRICITY_SPOT).collect(java.util.stream.Collectors.summarizingDouble(emlab.gen.domain.contract.CashFlow::getMoney)).getSum()
# java.util.logging.Logger.getGlobal().finer("Income Spot " + toReturn)
# return toReturn
#
# def calculateLongTermContractRevenueOfPowerPlant(self, cashFlows):
# toReturn = cashFlows.stream().filter(lambda p : p.getType() == emlab.gen.domain.contract.CashFlow.ELECTRICITY_LONGTERM).collect(java.util.stream.Collectors.summarizingDouble(emlab.gen.domain.contract.CashFlow::getMoney)).getSum()
# java.util.logging.Logger.getGlobal().finer("Income LT " + toReturn)
# return toReturn
#
# def calculateStrategicReserveRevenueOfPowerPlant(self, cashFlows):
# toReturn = cashFlows.stream().filter(lambda p : p.getType() == emlab.gen.domain.contract.CashFlow.STRRESPAYMENT).collect(java.util.stream.Collectors.summarizingDouble(emlab.gen.domain.contract.CashFlow::getMoney)).getSum()
# java.util.logging.Logger.getGlobal().finer("Income strategic reserve " + toReturn)
# return toReturn
#
# def calculateCapacityMarketRevenueOfPowerPlant(self, cashFlows):
# toReturn = cashFlows.stream().filter(lambda p : p.getType() == emlab.gen.domain.contract.CashFlow.CAPMARKETPAYMENT).collect(java.util.stream.Collectors.summarizingDouble(emlab.gen.domain.contract.CashFlow::getMoney)).getSum()
# java.util.logging.Logger.getGlobal().finer("Income Capacity market " + toReturn)
# return toReturn
#
# def calculateCO2HedgingRevenueOfPowerPlant(self, cashFlows):
# toReturn = cashFlows.stream().filter(lambda p : p.getType() == emlab.gen.domain.contract.CashFlow.CO2HEDGING).collect(java.util.stream.Collectors.summarizingDouble(emlab.gen.domain.contract.CashFlow::getMoney)).getSum()
# java.util.logging.Logger.getGlobal().finer("Income CO2 Hedging" + toReturn)
# return toReturn
#
# def calculateCO2CostsOfPowerPlant(self, list):
# return list.stream().filter(lambda p : (p.getType() == emlab.gen.domain.contract.CashFlow.CO2TAX) or (p.getType() == emlab.gen.domain.contract.CashFlow.CO2AUCTION) or (p.getType() == emlab.gen.domain.contract.CashFlow.NATIONALMINCO2)).mapToDouble(lambda p : p.getMoney()).sum()
# def calculateFixedCostsOfPowerPlant(self, list):
# pass
# #return list.stream().filter(lambda p : (p.getType() == CashFlow.FIXEDOMCOST) or (p.getType() == CashFlow.LOAN) or (p.getType() == CashFlow.DOWNPAYMENT)).mapToDouble(lambda p : p.getMoney()).sum()
#
# def calculateFixedOMCostsOfPowerPlant(self, list):
# pass
# #return list.stream().filter(lambda p : (p.getType() == CashFlow.FIXEDOMCOST)).mapToDouble(lambda p : p.getMoney()).sum()
| 69.091837 | 287 | 0.742136 | 6,497 | 0.959533 | 0 | 0 | 0 | 0 | 0 | 0 | 4,716 | 0.6965 |
8ce2f056900cdd54d05f6b392b7b1f1788adf0cb | 3,174 | py | Python | guess_number.py | redlinger/Guess_Number | ce0a08159b66b9c5bf8e2c529f02fc2a5f7071b7 | [
"MIT"
] | null | null | null | guess_number.py | redlinger/Guess_Number | ce0a08159b66b9c5bf8e2c529f02fc2a5f7071b7 | [
"MIT"
] | null | null | null | guess_number.py | redlinger/Guess_Number | ce0a08159b66b9c5bf8e2c529f02fc2a5f7071b7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Guess a Number between 1 and 100
Setup: The computer generates a random number between 1 and 100. The human's
goal is to guess that number in 5 or fewer guesses.
"""
import random
import time
# human number guess
hum_num = 0
# 5 trys to guess the right number
guesses = 5
# num_check =1 if guess right; =0 if guess wrong
num_check = 0
# variables to keep track of score
comp_wins = 0
comp_loss = 0
# function to check if guess is above, below, or equal to my number
def number_check(hum_num, comp_num, guess_left):
# if input is valid, then see if number is below, above, or same
if hum_num < comp_num:
print('My number is above %d.' % hum_num)
number_right = 0
elif hum_num > comp_num:
print('My number is below %d.' % hum_num)
number_right = 0
elif hum_num == comp_num:
number_right = 1
return number_right
# BEGIN GAME ##################################################################
print('Guess my number! Enter q to quit.' )
print("I'm thinking of a number between 1 and 100.")
print("Guess what number I picked with 5 or fewer guesses and you win (nothing)")
# continue playing until player wants to quit
while hum_num != 'q':
# print score
print('Score:')
print('Human', 'Computer', sep='\t')
print(' %s \t %s' % (comp_loss, comp_wins))
guess_left = guesses
# draw a number between 1 and 100
draw = random.randrange(0, 101)
while guess_left > 0:
# get input from huuman
print()
hum_num = input('Enter number between 1 and 100: ')
# end if user wants to quit
if hum_num == 'q':
print('Final Score:')
print('Human', 'Computer', sep='\t')
print(' %s \t %s' % (comp_loss, comp_wins))
break
# if input is invalid, report error
hum_num = int(hum_num)
if hum_num > 100 or hum_num < 1:
print('Invalid entry. Enter a number between 1 and 100')
else:
# check if human's guess it below, above, or equal to number
num_check = number_check(hum_num=hum_num, comp_num=draw,
guess_left=guess_left)
# if guess it correct, report human win and go to next game
if num_check==1:
print('My number is %i. You won this round!' % draw)
comp_loss += 1
break
guess_left -= 1
if guess_left > 1:
print('You have %i guesses left' % guess_left)
else:
print('You have %i guess left' % guess_left)
# if human is not able to guess correct in 5 tries, they lose
if num_check==0 and hum_num != 'q':
print('My number is %i. You lost this round.' % draw)
comp_wins += 1
# short break so slow human can read and then report score
time.sleep(2)
| 28.854545 | 82 | 0.538122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,491 | 0.469754 |
8ce539fa15c7f52a1ef7d6cae1f6f6a6ff2664fe | 708 | py | Python | 3_drop_additional_road_usages_measurements.py | IntroDS2017/SteamingPlayers | 992876c09ee6905bf4de1ea94b4ddad6321c8bb8 | [
"MIT"
] | null | null | null | 3_drop_additional_road_usages_measurements.py | IntroDS2017/SteamingPlayers | 992876c09ee6905bf4de1ea94b4ddad6321c8bb8 | [
"MIT"
] | null | null | null | 3_drop_additional_road_usages_measurements.py | IntroDS2017/SteamingPlayers | 992876c09ee6905bf4de1ea94b4ddad6321c8bb8 | [
"MIT"
] | null | null | null | import pandas as pd
def main():
load_path = "data/2_road_usages.csv"
save_path = "data/3_road_usages.csv"
df = pd.read_csv(load_path)
street_names = df.nimi.unique()
points_to_drop = []
for street in street_names:
points = df[df['nimi'] == street].piste.unique()
if len(points) > 1:
print(street + " has the following measurement points: " + str(points))
for i in range(1, len(points)):
points_to_drop.append(points[i])
print("\nDropping rows at measurement points " + str(points_to_drop))
df = df[-df['piste'].isin(points_to_drop)]
df.to_csv(save_path, index=False)
if __name__ == '__main__':
main()
| 23.6 | 83 | 0.618644 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.214689 |
8ce5694206f368d9b4bcbdda12bca0c06528e33e | 4,418 | py | Python | tests/application/files/parsers/test_dsv.py | alphagov-mirror/performanceplatform-admin | b63ae42b1276699623ef208b7d6edd3e0ce4ca59 | [
"MIT"
] | 1 | 2017-05-14T21:31:33.000Z | 2017-05-14T21:31:33.000Z | tests/application/files/parsers/test_dsv.py | alphagov-mirror/performanceplatform-admin | b63ae42b1276699623ef208b7d6edd3e0ce4ca59 | [
"MIT"
] | 33 | 2015-01-05T12:23:45.000Z | 2021-03-24T10:59:47.000Z | tests/application/files/parsers/test_dsv.py | alphagov-mirror/performanceplatform-admin | b63ae42b1276699623ef208b7d6edd3e0ce4ca59 | [
"MIT"
] | 4 | 2017-03-16T15:52:33.000Z | 2021-04-10T20:14:53.000Z | # -*- coding: utf-8 -*-
from application.files.parsers.dsv import parse_csv, lines
from application.files.parsers import ParseError
import unittest
from cStringIO import StringIO
from hamcrest import assert_that, only_contains, is_, contains
class ParseCsvTestCase(unittest.TestCase):
def test_parse_csv(self):
csv_stream = _string_io("a,b\nx,y\nq,w")
data = parse_csv(csv_stream)
assert_that(data, contains(
["a", "b"],
["x", "y"],
["q", "w"],
))
def test_parse_empty_csv(self):
csv_stream = _string_io("")
data = _traverse(parse_csv(csv_stream))
assert_that(data, is_([]))
def test_parse_utf8_data(self):
csv = u"a,b\nร ,รน"
csv_stream = _string_io(csv, "utf-8")
data = parse_csv(csv_stream)
assert_that(data, contains(
["a", "b"],
[u"ร ", u"รน"],
))
def test_error_when_input_is_not_utf8(self):
csv = u"a,b\nร ,รน"
csv_stream = _string_io(csv, "iso-8859-1")
self.assertRaises(ParseError,
lambda csv_stream: _traverse(parse_csv(csv_stream)),
csv_stream)
def test_ignore_when_empty_row(self):
csv = u"a,b\n,\nc,d"
csv_stream = _string_io(csv, "utf-8")
data = parse_csv(csv_stream)
assert_that(data, only_contains(
["a", "b"],
["c", "d"],
))
def test_accept_when_some_values_empty(self):
csv = u"a,b\n,\nc,d\nc,"
csv_stream = _string_io(csv, "utf-8")
data = parse_csv(csv_stream)
assert_that(data, only_contains(
["a", "b"],
["c", "d"],
["c", ""],
))
def test_ignore_comments(self):
csv = u"# top comment\na,b\n# any random comment\nc,d"
csv_stream = _string_io(csv, "utf-8")
data = parse_csv(csv_stream)
assert_that(data, only_contains(
["a", "b"],
["c", "d"],
))
def test_ignore_values_in_comments_column(self):
csv = u"a,comment,b\nc,d,e"
csv_stream = _string_io(csv, "utf-8")
data = parse_csv(csv_stream)
assert_that(data, only_contains(
["a", "b"],
["c", "e"],
))
def test_accept_csv_with_CR_as_line_separator(self):
csv = u"prop1,prop2\rvalue 1,value 2"
csv_stream = _string_io(csv, "utf-8")
data = parse_csv(csv_stream)
assert_that(data, only_contains(
["prop1", "prop2"],
["value 1", "value 2"],
))
def test_accept_csv_with_CRLF_as_line_separator(self):
csv = u"prop1,prop2\r\nvalue 1,value 2"
csv_stream = _string_io(csv, "utf-8")
data = parse_csv(csv_stream)
assert_that(data, only_contains(
["prop1", "prop2"],
["value 1", "value 2"],
))
def test_preserve_newlines_in_quoted_values(self):
csv = u"prop1,prop2\nvalue,\"value\nwith newline\""
csv_stream = _string_io(csv, "utf-8")
data = parse_csv(csv_stream)
assert_that(data, only_contains(
["prop1", "prop2"],
["value", "value\nwith newline"],
))
def test_parsing_numbers_in_cells(self):
csv = u"int,float,string\n12,12.1,a string"
csv_stream = _string_io(csv, "utf-8")
data = parse_csv(csv_stream)
assert_that(data, only_contains(
["int", "float", "string"],
[12, 12.1, "a string"],
))
class LinesGeneratorTest(unittest.TestCase):
def test_handles_CR_LF_and_CRLF(self):
text = "1\n2\r3\r\n4"
lines_list = list(lines(_string_io(text)))
assert_that(lines_list, is_(["1\n", "2\r", "3\r\n", "4"]))
def test_handles_emptylines(self):
text = "q\n\rw\r\r\ne"
lines_list = list(lines(_string_io(text)))
assert_that(lines_list, is_(["q\n", "\r", "w\r", "\r\n", "e"]))
def test_ignores_trailing_empty_line(self):
text = "asd\n"
lines_list = list(lines(_string_io(text)))
assert_that(lines_list, is_(["asd\n"]))
def _string_io(content, encoding=None):
if encoding is not None:
content = content.encode(encoding)
return StringIO(content)
def _traverse(content):
return map(lambda rows: list(rows), content)
| 25.102273 | 78 | 0.564056 | 3,955 | 0.893987 | 0 | 0 | 0 | 0 | 0 | 0 | 697 | 0.15755 |
8ce591611d3e31d6c0cca2832c1850b12c745822 | 16,225 | py | Python | sdk/python/pulumi_azure/waf/policy.py | pulumi-bot/pulumi-azure | 64d5c30a77b3f4de117068add359ef85293cef8e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/waf/policy.py | pulumi-bot/pulumi-azure | 64d5c30a77b3f4de117068add359ef85293cef8e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/waf/policy.py | pulumi-bot/pulumi-azure | 64d5c30a77b3f4de117068add359ef85293cef8e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Policy(pulumi.CustomResource):
custom_rules: pulumi.Output[list]
"""
One or more `custom_rules` blocks as defined below.
* `action` (`str`) - Type of action.
* `matchConditions` (`list`) - One or more `match_conditions` blocks as defined below.
* `matchValues` (`list`) - A list of match values.
* `matchVariables` (`list`) - One or more `match_variables` blocks as defined below.
* `selector` (`str`) - Describes field of the matchVariable collection
* `variableName` (`str`) - The name of the Match Variable
* `negationCondition` (`bool`) - Describes if this is negate condition or not
* `operator` (`str`) - Describes operator to be matched.
* `name` (`str`) - Gets name of the resource that is unique within a policy. This name can be used to access the resource.
* `priority` (`float`) - Describes priority of the rule. Rules with a lower value will be evaluated before rules with a higher value.
* `ruleType` (`str`) - Describes the type of rule.
"""
location: pulumi.Output[str]
"""
Resource location. Changing this forces a new resource to be created.
"""
managed_rules: pulumi.Output[dict]
"""
A `managed_rules` blocks as defined below.
* `exclusions` (`list`) - One or more `exclusion` block defined below.
* `matchVariable` (`str`)
* `selector` (`str`) - Describes field of the matchVariable collection.
* `selectorMatchOperator` (`str`) - Describes operator to be matched. Possible values: `Contains`, `EndsWith`, `Equals`, `EqualsAny`, `StartsWith`.
* `managedRuleSets` (`list`) - One or more `managed_rule_set` block defined below.
* `ruleGroupOverrides` (`list`) - One or more `rule_group_override` block defined below.
* `disabledRules` (`list`) - One or more Rule ID's
* `ruleGroupName` (`str`) - The name of the Rule Group
* `type` (`str`) - The rule set type.
* `version` (`str`) - The rule set version.
"""
name: pulumi.Output[str]
"""
The name of the policy. Changing this forces a new resource to be created.
"""
policy_settings: pulumi.Output[dict]
"""
A `policy_settings` block as defined below.
* `enabled` (`bool`) - Describes if the policy is in enabled state or disabled state Defaults to `Enabled`.
* `mode` (`str`) - Describes if it is in detection mode or prevention mode at the policy level Defaults to `Prevention`.
"""
resource_group_name: pulumi.Output[str]
"""
The name of the resource group. Changing this forces a new resource to be created.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the Web Application Firewall Policy.
"""
def __init__(__self__, resource_name, opts=None, custom_rules=None, location=None, managed_rules=None, name=None, policy_settings=None, resource_group_name=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a Azure Web Application Firewall Policy instance.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West US 2")
example_policy = azure.waf.Policy("examplePolicy",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
custom_rules=[
{
"name": "Rule1",
"priority": 1,
"ruleType": "MatchRule",
"match_conditions": [{
"match_variables": [{
"variableName": "RemoteAddr",
}],
"operator": "IPMatch",
"negationCondition": False,
"matchValues": [
"192.168.1.0/24",
"10.0.0.0/24",
],
}],
"action": "Block",
},
{
"name": "Rule2",
"priority": 2,
"ruleType": "MatchRule",
"match_conditions": [
{
"match_variables": [{
"variableName": "RemoteAddr",
}],
"operator": "IPMatch",
"negationCondition": False,
"matchValues": ["192.168.1.0/24"],
},
{
"match_variables": [{
"variableName": "RequestHeaders",
"selector": "UserAgent",
}],
"operator": "Contains",
"negationCondition": False,
"matchValues": ["Windows"],
},
],
"action": "Block",
},
],
policy_settings={
"enabled": True,
"mode": "Prevention",
},
managed_rules={
"exclusion": [
{
"matchVariable": "RequestHeaderNames",
"selector": "x-company-secret-header",
"selectorMatchOperator": "Equals",
},
{
"matchVariable": "RequestCookieNames",
"selector": "too-tasty",
"selectorMatchOperator": "EndsWith",
},
],
"managed_rule_set": [{
"type": "OWASP",
"version": "3.1",
"rule_group_override": [{
"ruleGroupName": "REQUEST-920-PROTOCOL-ENFORCEMENT",
"disabledRules": [
"920300",
"920440",
],
}],
}],
})
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] custom_rules: One or more `custom_rules` blocks as defined below.
:param pulumi.Input[str] location: Resource location. Changing this forces a new resource to be created.
:param pulumi.Input[dict] managed_rules: A `managed_rules` blocks as defined below.
:param pulumi.Input[str] name: The name of the policy. Changing this forces a new resource to be created.
:param pulumi.Input[dict] policy_settings: A `policy_settings` block as defined below.
:param pulumi.Input[str] resource_group_name: The name of the resource group. Changing this forces a new resource to be created.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the Web Application Firewall Policy.
The **custom_rules** object supports the following:
* `action` (`pulumi.Input[str]`) - Type of action.
* `matchConditions` (`pulumi.Input[list]`) - One or more `match_conditions` blocks as defined below.
* `matchValues` (`pulumi.Input[list]`) - A list of match values.
* `matchVariables` (`pulumi.Input[list]`) - One or more `match_variables` blocks as defined below.
* `selector` (`pulumi.Input[str]`) - Describes field of the matchVariable collection
* `variableName` (`pulumi.Input[str]`) - The name of the Match Variable
* `negationCondition` (`pulumi.Input[bool]`) - Describes if this is negate condition or not
* `operator` (`pulumi.Input[str]`) - Describes operator to be matched.
* `name` (`pulumi.Input[str]`) - Gets name of the resource that is unique within a policy. This name can be used to access the resource.
* `priority` (`pulumi.Input[float]`) - Describes priority of the rule. Rules with a lower value will be evaluated before rules with a higher value.
* `ruleType` (`pulumi.Input[str]`) - Describes the type of rule.
The **managed_rules** object supports the following:
* `exclusions` (`pulumi.Input[list]`) - One or more `exclusion` block defined below.
* `matchVariable` (`pulumi.Input[str]`)
* `selector` (`pulumi.Input[str]`) - Describes field of the matchVariable collection.
* `selectorMatchOperator` (`pulumi.Input[str]`) - Describes operator to be matched. Possible values: `Contains`, `EndsWith`, `Equals`, `EqualsAny`, `StartsWith`.
* `managedRuleSets` (`pulumi.Input[list]`) - One or more `managed_rule_set` block defined below.
* `ruleGroupOverrides` (`pulumi.Input[list]`) - One or more `rule_group_override` block defined below.
* `disabledRules` (`pulumi.Input[list]`) - One or more Rule ID's
* `ruleGroupName` (`pulumi.Input[str]`) - The name of the Rule Group
* `type` (`pulumi.Input[str]`) - The rule set type.
* `version` (`pulumi.Input[str]`) - The rule set version.
The **policy_settings** object supports the following:
* `enabled` (`pulumi.Input[bool]`) - Describes if the policy is in enabled state or disabled state Defaults to `Enabled`.
* `mode` (`pulumi.Input[str]`) - Describes if it is in detection mode or prevention mode at the policy level Defaults to `Prevention`.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['custom_rules'] = custom_rules
__props__['location'] = location
if managed_rules is None:
raise TypeError("Missing required property 'managed_rules'")
__props__['managed_rules'] = managed_rules
__props__['name'] = name
__props__['policy_settings'] = policy_settings
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
super(Policy, __self__).__init__(
'azure:waf/policy:Policy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, custom_rules=None, location=None, managed_rules=None, name=None, policy_settings=None, resource_group_name=None, tags=None):
"""
Get an existing Policy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] custom_rules: One or more `custom_rules` blocks as defined below.
:param pulumi.Input[str] location: Resource location. Changing this forces a new resource to be created.
:param pulumi.Input[dict] managed_rules: A `managed_rules` blocks as defined below.
:param pulumi.Input[str] name: The name of the policy. Changing this forces a new resource to be created.
:param pulumi.Input[dict] policy_settings: A `policy_settings` block as defined below.
:param pulumi.Input[str] resource_group_name: The name of the resource group. Changing this forces a new resource to be created.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the Web Application Firewall Policy.
The **custom_rules** object supports the following:
* `action` (`pulumi.Input[str]`) - Type of action.
* `matchConditions` (`pulumi.Input[list]`) - One or more `match_conditions` blocks as defined below.
* `matchValues` (`pulumi.Input[list]`) - A list of match values.
* `matchVariables` (`pulumi.Input[list]`) - One or more `match_variables` blocks as defined below.
* `selector` (`pulumi.Input[str]`) - Describes field of the matchVariable collection
* `variableName` (`pulumi.Input[str]`) - The name of the Match Variable
* `negationCondition` (`pulumi.Input[bool]`) - Describes if this is negate condition or not
* `operator` (`pulumi.Input[str]`) - Describes operator to be matched.
* `name` (`pulumi.Input[str]`) - Gets name of the resource that is unique within a policy. This name can be used to access the resource.
* `priority` (`pulumi.Input[float]`) - Describes priority of the rule. Rules with a lower value will be evaluated before rules with a higher value.
* `ruleType` (`pulumi.Input[str]`) - Describes the type of rule.
The **managed_rules** object supports the following:
* `exclusions` (`pulumi.Input[list]`) - One or more `exclusion` block defined below.
* `matchVariable` (`pulumi.Input[str]`)
* `selector` (`pulumi.Input[str]`) - Describes field of the matchVariable collection.
* `selectorMatchOperator` (`pulumi.Input[str]`) - Describes operator to be matched. Possible values: `Contains`, `EndsWith`, `Equals`, `EqualsAny`, `StartsWith`.
* `managedRuleSets` (`pulumi.Input[list]`) - One or more `managed_rule_set` block defined below.
* `ruleGroupOverrides` (`pulumi.Input[list]`) - One or more `rule_group_override` block defined below.
* `disabledRules` (`pulumi.Input[list]`) - One or more Rule ID's
* `ruleGroupName` (`pulumi.Input[str]`) - The name of the Rule Group
* `type` (`pulumi.Input[str]`) - The rule set type.
* `version` (`pulumi.Input[str]`) - The rule set version.
The **policy_settings** object supports the following:
* `enabled` (`pulumi.Input[bool]`) - Describes if the policy is in enabled state or disabled state Defaults to `Enabled`.
* `mode` (`pulumi.Input[str]`) - Describes if it is in detection mode or prevention mode at the policy level Defaults to `Prevention`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["custom_rules"] = custom_rules
__props__["location"] = location
__props__["managed_rules"] = managed_rules
__props__["name"] = name
__props__["policy_settings"] = policy_settings
__props__["resource_group_name"] = resource_group_name
__props__["tags"] = tags
return Policy(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 51.671975 | 223 | 0.593467 | 15,915 | 0.980894 | 0 | 0 | 4,348 | 0.267982 | 0 | 0 | 13,563 | 0.835932 |
8ce83244a55e8bc68a89ac42669afba7cecbc338 | 1,961 | py | Python | tests/test__file_importer.py | johnmdelgado/SRE-Project | 4637c2fa5a7d93da96d1e14ab96fcab8b652f076 | [
"MIT"
] | null | null | null | tests/test__file_importer.py | johnmdelgado/SRE-Project | 4637c2fa5a7d93da96d1e14ab96fcab8b652f076 | [
"MIT"
] | null | null | null | tests/test__file_importer.py | johnmdelgado/SRE-Project | 4637c2fa5a7d93da96d1e14ab96fcab8b652f076 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
'''
FileName: test__file_importer.py
Author: John Delgado
Created Date: 8/7/2020
Version: 1.0 Initial Development
This is the testing file for the file_importer script
'''
import os
import sys
import inspect
functions_dir = os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))))+ "/scripts" #scripts Directory
print(functions_dir)
sys.path.insert(0, functions_dir)
import file_importer
import unittest
import yaml
with open("../configs/config.yaml", "r") as ymlfile:
config = yaml.safe_load(ymlfile)
class password_characters_test_case(unittest.TestCase):
# ========================================================================================
# Program termininating test cases
# ========================================================================================
def test_file_path_does_not_exist(self):
# should exit executing code
test_string = "./test.txt"
self.assertRaises(Exception, file_importer.file_importer, test_string,config["debugging"]["test_debug"])
def test_file_is_not_txt_file(self):
# should exit executing code
testString = "../data/test.csv"
self.assertRaises(Exception, file_importer.file_importer, testString,config["debugging"]["test_debug"])
# ========================================================================================
# Valid filepaths returning map test cases
# ========================================================================================
def test_default_file_path_from_config(self):
# should exit executing code
testString = config["testing"]["sample_excluded_pw_filepath"]
result = file_importer.file_importer(testString,
config["debugging"]["test_debug"])
self.assertIsInstance(result, object)
if __name__ == '__main__':
unittest.main()
| 35.654545 | 137 | 0.570117 | 1,346 | 0.686384 | 0 | 0 | 0 | 0 | 0 | 0 | 912 | 0.465069 |
8ce85526db08143d6f7b5fd20f02640f67bf4970 | 1,058 | py | Python | ProjectEuler/p049.py | TISparta/competitive-programming-solutions | 31987d4e67bb874bf15653565c6418b5605a20a8 | [
"MIT"
] | 1 | 2018-01-30T13:21:30.000Z | 2018-01-30T13:21:30.000Z | ProjectEuler/p049.py | TISparta/competitive-programming-solutions | 31987d4e67bb874bf15653565c6418b5605a20a8 | [
"MIT"
] | null | null | null | ProjectEuler/p049.py | TISparta/competitive-programming-solutions | 31987d4e67bb874bf15653565c6418b5605a20a8 | [
"MIT"
] | 1 | 2018-08-29T13:26:50.000Z | 2018-08-29T13:26:50.000Z |
# Execution time : 0.440223 seconds
# Solution Explanation
# We can simplily iterate through all the 4-digits numbers
# Then generate all the permutation of this number and check
# if the desirable sequence, distinct from the given one, is found
import time
width = 40
import itertools
import math
def solution():
isPrime = lambda p: p>=1000 and all(p%it!=0 for it in range(2,int(math.sqrt(p))+1))
for num in range(1488,10000):
v = [int(''.join(ch for ch in it)) for it in itertools.permutations(str(num))]
v.sort()
for it1 in range(len(v)):
for it2 in range(it1+1,len(v)):
r = v[it2] - v[it1]
if r > 0 and v[it1]!=1487 and v[it2]+r in v:
if isPrime(v[it1]) and isPrime(v[it2]) and isPrime(v[it2]+r):
return str(v[it1])+str(v[it2])+str(v[it2]+r)
if __name__=="__main__":
start_ = time.time()
print(' Answer -> %s '.center(width,'-') % ( solution() ))
print(' %f seconds '.center(width,'-') % ( time.time() - start_))
| 31.117647 | 87 | 0.591682 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 289 | 0.273157 |
8ce9041cde0ebb1b404234f645d889fe9d5bf906 | 955 | py | Python | session7/Keypad.py | rezafari/raspberry | e6720780f3c65ee1809040fc538f793fe44f0111 | [
"MIT"
] | 19 | 2017-09-26T04:37:55.000Z | 2021-12-15T05:39:57.000Z | session7/Keypad.py | rezafari/raspberry | e6720780f3c65ee1809040fc538f793fe44f0111 | [
"MIT"
] | 44 | 2017-11-22T04:56:26.000Z | 2018-03-17T14:30:00.000Z | session7/Keypad.py | rezafari/raspberry | e6720780f3c65ee1809040fc538f793fe44f0111 | [
"MIT"
] | 21 | 2017-09-23T05:25:59.000Z | 2021-05-31T10:24:49.000Z | ######################################################################
# Keypad.py
#
# This program read matrix keypad and print label of pressed button
######################################################################
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
rows = [17, 27, 22, 18]
cols = [23, 24, 25, 8]
keys = [
['1', '2', '3','F1'],
['4', '5', '6','F2'],
['7', '8', '9','F3'],
['Stop', '0', 'Start','Enter']]
for rowPin in rows:
GPIO.setup(rowPin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
for colPin in cols:
GPIO.setup(colPin, GPIO.OUT)
def GetKey():
key = 0
for colNum, colPin in enumerate(cols):
GPIO.output(colPin, 1)
for rowNum, rowPin in enumerate(rows):
if GPIO.input(rowPin):
key = keys[rowNum][colNum]
GPIO.output(colPin, 0)
return key
while True:
key = GetKey()
if key :
print(key)
time.sleep(0.3)
| 23.292683 | 70 | 0.479581 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 287 | 0.300524 |
8cea1e3a37d9a781638564690e38de965b9f82ad | 106 | py | Python | apps.py | qcoumes/django-dummy-app | e53266e4128cfada992e0d5938083603b25c4e84 | [
"MIT"
] | null | null | null | apps.py | qcoumes/django-dummy-app | e53266e4128cfada992e0d5938083603b25c4e84 | [
"MIT"
] | null | null | null | apps.py | qcoumes/django-dummy-app | e53266e4128cfada992e0d5938083603b25c4e84 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class DummyAppConfigConfig(AppConfig):
name = 'django_dummy_app'
| 15.142857 | 38 | 0.783019 | 68 | 0.641509 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.169811 |
8ceb714b7a4d53f34b6d75b19bb0ad8dddc83171 | 2,236 | py | Python | src/VulnSituation/DownloadLinuxKernel.py | LeoneChen/VulnSituation | 155e1849e2d13ef12068c9474efe760ba917844a | [
"Apache-2.0"
] | null | null | null | src/VulnSituation/DownloadLinuxKernel.py | LeoneChen/VulnSituation | 155e1849e2d13ef12068c9474efe760ba917844a | [
"Apache-2.0"
] | null | null | null | src/VulnSituation/DownloadLinuxKernel.py | LeoneChen/VulnSituation | 155e1849e2d13ef12068c9474efe760ba917844a | [
"Apache-2.0"
] | null | null | null | # Author: 14281055 Liheng Chen CIT BJTU
# File Name: DownloadLinuxKernel.py
import Repository
import random
import bs4
import os
import re
def download_certain_version_number_linux_kernel(hyperlink, save_dir):
content = Repository.requests_get_content(hyperlink, timeout=10,
headers={
'User-Agent': random.choice(Repository.user_agent_list)})
if content:
soup = bs4.BeautifulSoup(content, 'lxml')
version_number_downloaded_list = []
for tag_a in soup.select('a'):
match = re.match('linux-(\d+(.\d+)*)\.tar.*', tag_a.get_text(), re.I)
if match and match.group(1) not in version_number_downloaded_list:
print('\rDownload:\033[4;34m' + match.group() + '\033[0m', end='')
Repository.download_file_in_chunk(hyperlink + tag_a['href'],
os.path.join(save_dir, tag_a.get_text()))
version_number_downloaded_list.append(match.group(1))
return True
return False
def download_linux_kernel(save_dir, linux_kernel_series):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
linux_kernel_root_url = 'https://mirrors.edge.kernel.org/pub/linux/kernel/'
content = Repository.requests_get_content(linux_kernel_root_url, timeout=10,
headers={
'User-Agent': random.choice(Repository.user_agent_list)})
if content:
soup = bs4.BeautifulSoup(content, 'lxml')
for tag_a in soup.select('a'):
# if re.match(r'v\d+\.[\dx]', tag_a.get_text().strip(), re.I):
if re.match(linux_kernel_series, tag_a.get_text().strip(), re.I):
if not download_certain_version_number_linux_kernel(linux_kernel_root_url + tag_a['href'],
save_dir):
print('\r\033[1;31mFail:' + tag_a.get_text().strip() + '\033[0m')
return True
return False
if __name__ == '__main__':
download_linux_kernel(r'D:\Linux Kernel\All Zip', r'v\d+\.(\d+|x)')
| 43.843137 | 107 | 0.576029 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 382 | 0.170841 |
8cec8ca187df1c877444afc96cfcd6107c908805 | 3,950 | py | Python | v7/upgrade_metadata/upgrade_metadata.py | MattiooFR/plugins | 90a686609fb5be2e83221c1f0e8fce18cb2b6021 | [
"MIT"
] | 53 | 2015-08-14T20:28:05.000Z | 2021-06-02T00:38:23.000Z | v7/upgrade_metadata/upgrade_metadata.py | MattiooFR/plugins | 90a686609fb5be2e83221c1f0e8fce18cb2b6021 | [
"MIT"
] | 280 | 2015-01-10T15:57:44.000Z | 2022-03-27T20:47:08.000Z | v7/upgrade_metadata/upgrade_metadata.py | MattiooFR/plugins | 90a686609fb5be2e83221c1f0e8fce18cb2b6021 | [
"MIT"
] | 93 | 2015-01-26T19:39:02.000Z | 2022-03-24T17:12:42.000Z | # -*- coding: utf-8 -*-
# Copyright ยฉ 2014โ2015, Chris Warrick.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import io
import os
import nikola.post
from nikola.plugin_categories import Command
from nikola import utils
class UpgradeMetadata(Command):
"""Upgrade metadata from the old no-descriptions format to the new reST-esque format."""
name = 'upgrade_metadata'
doc_purpose = 'upgrade old-style metadata'
cmd_options = [
{
'name': 'yes',
'short': 'y',
'long': 'yes',
'type': bool,
'default': False,
'help': 'Proceed without confirmation',
},
]
fields = ('title', 'slug', 'date', 'tags', 'link', 'description', 'type')
def _execute(self, options, args):
L = utils.get_logger('upgrade_metadata', utils.STDERR_HANDLER)
nikola.post._UPGRADE_METADATA_ADVERTISED = True
# scan posts
self.site.scan_posts()
flagged = []
for post in self.site.timeline:
if not post.newstylemeta:
flagged.append(post)
if flagged:
if len(flagged) == 1:
L.info('1 post (and/or its translations) contains old-style metadata:')
else:
L.info('{0} posts (and/or their translations) contain old-style metadata:'.format(len(flagged)))
for post in flagged:
L.info(' ' + post.metadata_path)
if not options['yes']:
yesno = utils.ask_yesno("Proceed with metadata upgrade?")
if options['yes'] or yesno:
for post in flagged:
for lang in self.site.config['TRANSLATIONS'].keys():
if lang == post.default_lang:
fname = post.metadata_path
else:
meta_path = os.path.splitext(post.source_path)[0] + '.meta'
fname = utils.get_translation_candidate(post.config, meta_path, lang)
if os.path.exists(fname):
with io.open(fname, 'r', encoding='utf-8') as fh:
meta = fh.readlines()
if not meta[min(1, len(meta) - 1)].startswith('.. '):
# check if weโre dealing with old style metadata
with io.open(fname, 'w', encoding='utf-8') as fh:
for k, v in zip(self.fields, meta):
fh.write('.. {0}: {1}'.format(k, v))
L.debug(fname)
L.info('{0} posts upgraded.'.format(len(flagged)))
else:
L.info('Metadata not upgraded.')
else:
L.info('No old-style metadata posts found. No action is required.')
| 41.145833 | 112 | 0.580253 | 2,670 | 0.675095 | 0 | 0 | 0 | 0 | 0 | 0 | 1,796 | 0.454109 |
8ceeb04c27c60808e7308fa7e85058c9b847f524 | 9,118 | py | Python | code/visualization/visualizer.py | JRMfer/GangRivalry | c621ec5d5e19de08a750800e27a62a942c2237b1 | [
"MIT"
] | null | null | null | code/visualization/visualizer.py | JRMfer/GangRivalry | c621ec5d5e19de08a750800e27a62a942c2237b1 | [
"MIT"
] | null | null | null | code/visualization/visualizer.py | JRMfer/GangRivalry | c621ec5d5e19de08a750800e27a62a942c2237b1 | [
"MIT"
] | 1 | 2020-02-09T16:17:26.000Z | 2020-02-09T16:17:26.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script contains functions to visualize the development of the statistics
over the total iterations per simulation and it contains functions to plot an
arbitrary graph and all the graphs generated by all the simulations for a given
human mobility algorithm.
"""
# Import built-in modules
import os
# Import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import networkx as nx
def plot_metrics(algorithm, simulations, user_name):
"""
This function collects all csv files generated by a specific algorithm and
user, calculates the averages and standard deviations for the three
statiscs for accuracy and shape metrics for each iteration moment and makes
a plot of the development over time for each of the statistcs and saves
them to specific file for the given user.
Input:
algorithm = algorithm for human mobility
simulations = the total amount of simulations performed
user_name = name of the user who ran the simulations
Output:
Plots are saved in a folder specified by results_{user_name}/algorithm
"""
# Create relative file path to results folder and to the csv files
alg_path = os.path.join(f"results_{user_name}", algorithm)
path = os.path.join(alg_path, "datacollector_sim")
# Load all csv files into a dataframe and store each of them in a list.
dfs = [pd.read_csv(path + str(sim) + ".csv") for sim in range(simulations)]
# Determine the amount of observation per variable and create tracking
# varibales for the three statistics for accuracy and shape metrics
size = dfs[0]["Accuracy"].size
all_accuracies, all_shape = [], []
# Collects all values for each of the statistics seperately
nr_stats = 3
for variable in range(nr_stats):
var_accuracy = [[] for _ in range(size)]
var_shape = [[] for _ in range(size)]
for df in dfs:
for obs in range(size):
accuracy = df[["Accuracy"]].iloc[obs][0]
shapes = df[["Shape"]].iloc[obs][0]
acc_preproccesed = accuracy.strip("(),").split(",")
shapes_preproccesed = shapes.strip("(),").split(",")
number = float(acc_preproccesed[variable])
number2 = float(shapes_preproccesed[variable])
var_accuracy[obs].append(number), var_shape[obs].append(number2)
all_accuracies.append(var_accuracy), all_shape.append(var_shape)
# Determine the averages and standard deviation of each of the statistics
ave_accuracies, ave_shapes = [[], [], []], [[], [], []]
stds_accuracies, stds_shapes = [[], [], []], [[], [], []]
for variable in range(nr_stats):
for acc, s in zip(all_accuracies[variable], all_shape[variable]):
ave_accuracies[variable].append(np.mean(acc))
ave_shapes[variable].append(np.mean(s))
stds_accuracies[variable].append(np.std(acc))
stds_shapes[variable].append(np.std(s))
# Make plots for the accuracy statistics
variables = ["Accuracy", "F1", "Mathews_Correlation_Coeffcient"]
make_figure(algorithm, alg_path, variables, ave_accuracies, stds_accuracies)
# Make plots for the shape metrics
variables = ["Density", "Variance_degree", "Centrality"]
make_figure(algorithm, alg_path, variables, ave_shapes, stds_shapes)
def make_figure(algorithm, alg_path, variables, averages, stds):
"""
Makes plots of each of the statistics' averages and confidence interval
against the total iterations.
Input:
algorithm = human mobility algorithm
alg_path = relative file path to save the figures
variables = array-like containing the names of the statistics
averages = nested list containing the averages of each statistic
stds = nested list containing the standard deviations of each statistic
Output:
Saves figures to a folder specified by results_{user_name}/algorithm
"""
# Makes a figure for each statistic
for i, statistic in enumerate(averages):
plt.figure()
# Create values for x-axis (iterations) and make plot
observations = len(statistic)
x = [0.01 * i for i in range(observations)]
plt.plot(x, statistic, color="darkblue")
# Generate different title and x label for the different algorithms
if algorithm == "SBLN":
plt.title("Mean accuracy over all tests for Levy based walk")
plt.xlabel("iteration number (10^5)")
elif algorithm == "GRAV":
plt.title("Mean accuracy over all tests for gravitywalk")
plt.xlabel("iteration number (10^5)")
elif algorithm == "BM":
plt.title("Mean accuracy over all tests for Brownian Motion")
plt.xlabel("iteration number (10^6)")
# Create y label and plot confidence interval
plt.ylabel(variables[i])
plt.errorbar(
x, statistic, yerr=stds[i],
alpha=0.1, color="cornflowerblue"
)
# Save figure to results folder, when done close figure
plt.savefig(os.path.join(alg_path,
f"plot_{algorithm}_{variables[i]}.pdf"),
dpi=300
)
plt.close()
def plot_network(road_dens, graph, user_name, gr_type):
"""
Makes a plot of a specific graph.
Input:
road_dens = road denisty matrix of the area (numpy)
graph = graph object (networkx)
user_name = name of the user who ran the simulations (string)
gr_type = name of the graph (string)
Output:
Saves files to a folder of the form results_{user_name}/algorithm
"""
# Create relative file path to results folder
# and determine widht, height of area
path = os.path.join(f"results_{user_name}", f"{gr_type}.pdf")
width = road_dens.shape[1] - 1
height = road_dens.shape[0] - 1
# Draw graph with the nodes on thei positions and their size depending on
# it's nodal degree (amount of rivals)
pos = nx.get_node_attributes(graph, "pos")
d = dict(graph.degree)
nx.draw(graph, pos, node_size=[(v + 1) * 5 for v in d.values()])
# Set x, y limits and title graph. If done save figure and close figure
plt.xlim(0, width)
plt.ylim(0, height)
plt.title(f"{gr_type}")
plt.savefig(path, dpi=300)
plt.close()
def plot_networks(algorithm, simulations, config, user_name):
"""
Makes figures of the graphs generated at the end of each simulation by a
certain user.
Input:
algorithm = human mobility algorithm
simulations = total amount of simulations that the model has ran
config = object containing the configuration of the model
user_name = name of the user who ran the simulations
"""
# Create relative file path to results folder
# and to load each rivalry matrix
alg_path = os.path.join(f"results_{user_name}", algorithm)
path = os.path.join(alg_path, "rivalry_matrix_sim")
# Load all rivalry matrices
matrices_sim = [np.load(path + str(sim) + ".npy")
for sim in range(simulations)]
# Determine width and height of the area (Hollenbeck)
width = config.road_dens.shape[1] - 1
height = config.road_dens.shape[0] - 1
# Create graph for each of the rivalry matrices
shape = len(config.gang_info)
for mat, matrix in enumerate(matrices_sim):
# Initialize graph with the nodes (gangs) at their home locations
graph = nx.Graph()
for gang in config.gang_info.values():
graph.add_node(gang.number, pos=gang.coords)
# Determines if the rivalry between gangs is larger than a threshold.
# If so add edge as confirmation of the rivalry
for i in range(shape):
total_interactions = matrix[i, :].sum()
for j in range(shape):
if total_interactions:
rival_strength = matrix[i][j] / total_interactions
if rival_strength > config.parameters["threshold"]:
graph.add_edge(i, j, color=config.colors[i])
# Draw graph with the nodes at their location and their size depending
# on the nodal degree (amount of rivals)
pos = nx.get_node_attributes(graph, "pos")
d = dict(graph.degree)
nx.draw(graph, pos, node_size=[(v + 1) * 5 for v in d.values()])
# Plots a differen title depending on the human mobility algorithm
if algorithm == "GRAV":
plt.title("Network Gravity model")
elif algorithm == "SBLN":
plt.title("Network Semi-Biased Levy walk")
elif algorithm == "BM":
plt.title("Network Brownian Motion")
# Set limits axis and save figure to folder. If done close figure
plt.xlim(0, width)
plt.ylim(0, height)
plt.savefig(os.path.join(alg_path, f"network_sim{mat}.pdf"), dpi=300)
plt.close()
| 40.167401 | 80 | 0.645427 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,624 | 0.507129 |
8cef6e357eeb4cff94f77de03b2338959c11b5e2 | 1,347 | py | Python | numpytorch/losses.py | Samyak2/numpytorch | b508c8c2169c6c94f5bee915a12beff0790da5f8 | [
"MIT"
] | 2 | 2021-04-14T07:14:50.000Z | 2021-04-14T17:22:55.000Z | numpytorch/losses.py | Samyak2/numpytorch | b508c8c2169c6c94f5bee915a12beff0790da5f8 | [
"MIT"
] | null | null | null | numpytorch/losses.py | Samyak2/numpytorch | b508c8c2169c6c94f5bee915a12beff0790da5f8 | [
"MIT"
] | null | null | null | import numpy as np
EPS = 1e-06
class Loss:
"""Generic class to define a loss function"""
def __init__(self):
pass
def __call__(self, y_real: np.ndarray, y_pred: np.ndarray) -> np.ndarray:
return self.forward(y_real, y_pred)
def forward(self, y_real: np.ndarray, y_pred: np.ndarray) -> np.ndarray:
"""Calculates and returns the loss value."""
raise NotImplementedError(
"Forward pass of this loss function has not been implemented"
)
def backward(self, y_real: np.ndarray, y_pred: np.ndarray) -> np.ndarray:
"""Calculates the gradient of the output predictions"""
raise NotImplementedError(
"Backward pass of this loss function has not been implemented"
)
class BinaryCrossEntropy(Loss):
"""Binary Cross Entropy loss used for binary classification
Calculates negative log likelihood i.e., the entropy between the real
distribution and the predicted distribution.
"""
def forward(self, y_real: np.ndarray, y_pred: np.ndarray) -> np.ndarray:
return -(
y_real * np.log(y_pred + EPS) + (1 - y_real) * np.log(1 - y_pred + EPS)
)
def backward(self, y_real: np.ndarray, y_pred: np.ndarray) -> np.ndarray:
return -(y_real / (y_pred + EPS)) + (1 - y_real) / (1 - y_pred + EPS)
| 32.071429 | 83 | 0.639941 | 1,309 | 0.971789 | 0 | 0 | 0 | 0 | 0 | 0 | 458 | 0.340015 |
8cf00123ba48dd1a021707f90e31886c363b468e | 6,202 | py | Python | appengine/monorail/features/test/hotlistcreate_test.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | 2 | 2021-04-13T21:22:18.000Z | 2021-09-07T02:11:57.000Z | appengine/monorail/features/test/hotlistcreate_test.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | 21 | 2020-09-06T02:41:05.000Z | 2022-03-02T04:40:01.000Z | appengine/monorail/features/test/hotlistcreate_test.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Unit test for Hotlist creation servlet."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import mox
import unittest
import settings
from framework import permissions
from features import hotlistcreate
from proto import site_pb2
from services import service_manager
from testing import fake
from testing import testing_helpers
class HotlistCreateTest(unittest.TestCase):
"""Tests for the HotlistCreate servlet."""
def setUp(self):
self.cnxn = 'fake cnxn'
self.mr = testing_helpers.MakeMonorailRequest()
self.services = service_manager.Services(project=fake.ProjectService(),
user=fake.UserService(),
issue=fake.IssueService(),
features=fake.FeaturesService())
self.servlet = hotlistcreate.HotlistCreate('req', 'res',
services=self.services)
self.mox = mox.Mox()
def tearDown(self):
self.mox.UnsetStubs()
self.mox.ResetAll()
def CheckAssertBasePermissions(
self, restriction, expect_admin_ok, expect_nonadmin_ok):
old_hotlist_creation_restriction = settings.hotlist_creation_restriction
settings.hotlist_creation_restriction = restriction
mr = testing_helpers.MakeMonorailRequest(
perms=permissions.GetPermissions(None, {}, None))
self.assertRaises(
permissions.PermissionException,
self.servlet.AssertBasePermission, mr)
mr = testing_helpers.MakeMonorailRequest()
if expect_admin_ok:
self.servlet.AssertBasePermission(mr)
else:
self.assertRaises(
permissions.PermissionException,
self.servlet.AssertBasePermission, mr)
mr = testing_helpers.MakeMonorailRequest(
perms=permissions.GetPermissions(mr.auth.user_pb, {111}, None))
if expect_nonadmin_ok:
self.servlet.AssertBasePermission(mr)
else:
self.assertRaises(
permissions.PermissionException,
self.servlet.AssertBasePermission, mr)
settings.hotlist_creation_restriction = old_hotlist_creation_restriction
def testAssertBasePermission(self):
self.CheckAssertBasePermissions(
site_pb2.UserTypeRestriction.ANYONE, True, True)
self.CheckAssertBasePermissions(
site_pb2.UserTypeRestriction.ADMIN_ONLY, True, False)
self.CheckAssertBasePermissions(
site_pb2.UserTypeRestriction.NO_ONE, False, False)
def testGatherPageData(self):
page_data = self.servlet.GatherPageData(self.mr)
self.assertEqual('st6', page_data['user_tab_mode'])
self.assertEqual('', page_data['initial_name'])
self.assertEqual('', page_data['initial_summary'])
self.assertEqual('', page_data['initial_description'])
self.assertEqual('', page_data['initial_editors'])
self.assertEqual('no', page_data['initial_privacy'])
def testProcessFormData(self):
self.servlet.services.user.TestAddUser('owner', 111)
self.mr.auth.user_id = 111
post_data = fake.PostData(hotlistname=['Hotlist'], summary=['summ'],
description=['hey'],
editors=[''], is_private=['yes'])
url = self.servlet.ProcessFormData(self.mr, post_data)
self.assertTrue('/u/111/hotlists/Hotlist' in url)
def testProcessFormData_OwnerInEditors(self):
self.servlet.services.user.TestAddUser('owner_editor', 222)
self.mr.auth.user_id = 222
self.mr.cnxn = 'fake cnxn'
post_data = fake.PostData(hotlistname=['Hotlist-owner-editor'],
summary=['summ'],
description=['hi'],
editors=['owner_editor'], is_private=['yes'])
url = self.servlet.ProcessFormData(self.mr, post_data)
self.assertTrue('/u/222/hotlists/Hotlist-owner-editor' in url)
hotlists_by_id = self.servlet.services.features.LookupHotlistIDs(
self.mr.cnxn, ['Hotlist-owner-editor'], [222])
self.assertTrue(('hotlist-owner-editor', 222) in hotlists_by_id)
hotlist_id = hotlists_by_id[('hotlist-owner-editor', 222)]
hotlist = self.servlet.services.features.GetHotlist(
self.mr.cnxn, hotlist_id, use_cache=False)
self.assertEqual(hotlist.owner_ids, [222])
self.assertEqual(hotlist.editor_ids, [])
def testProcessFormData_RejectTemplateInvalid(self):
mr = testing_helpers.MakeMonorailRequest()
# invalid hotlist name and nonexistent editor
post_data = fake.PostData(hotlistname=['123BadName'], summary=['summ'],
description=['hey'],
editors=['test@email.com'], is_private=['yes'])
self.mox.StubOutWithMock(self.servlet, 'PleaseCorrect')
self.servlet.PleaseCorrect(
mr, initial_name = '123BadName', initial_summary='summ',
initial_description='hey',
initial_editors='test@email.com', initial_privacy='yes')
self.mox.ReplayAll()
url = self.servlet.ProcessFormData(mr, post_data)
self.mox.VerifyAll()
self.assertEqual(mr.errors.hotlistname, 'Invalid hotlist name')
self.assertEqual(mr.errors.editors,
'One or more editor emails is not valid.')
self.assertIsNone(url)
def testProcessFormData_RejectTemplateMissing(self):
mr = testing_helpers.MakeMonorailRequest()
# missing name and summary
post_data = fake.PostData()
self.mox.StubOutWithMock(self.servlet, 'PleaseCorrect')
self.servlet.PleaseCorrect(mr, initial_name = None, initial_summary=None,
initial_description='',
initial_editors='', initial_privacy=None)
self.mox.ReplayAll()
url = self.servlet.ProcessFormData(mr, post_data)
self.mox.VerifyAll()
self.assertEqual(mr.errors.hotlistname, 'Missing hotlist name')
self.assertEqual(mr.errors.summary,'Missing hotlist summary')
self.assertIsNone(url)
| 41.624161 | 77 | 0.685263 | 5,580 | 0.89971 | 0 | 0 | 0 | 0 | 0 | 0 | 988 | 0.159303 |
8cf0b1c18c5375055000e88060d4e4637f6ec81e | 3,123 | py | Python | PocketBeagle/Grove/Start_the_Party.py | zhanglongqi/cloud9-examples | db73b843a807361153a00e17a703206202cf73f8 | [
"MIT"
] | 37 | 2019-05-30T16:40:01.000Z | 2021-11-29T10:34:36.000Z | PocketBeagle/Grove/Start_the_Party.py | silver2row/cloud9-examples | 741198a2b7a3c625fc4d877bdc5f60949ddf724d | [
"MIT"
] | 35 | 2019-04-03T17:35:26.000Z | 2022-03-23T12:28:08.000Z | PocketBeagle/Grove/Start_the_Party.py | silver2row/cloud9-examples | 741198a2b7a3c625fc4d877bdc5f60949ddf724d | [
"MIT"
] | 40 | 2019-05-13T00:25:42.000Z | 2022-03-23T05:13:15.000Z | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# [Grove - 12 Key Capacitive I2C Touch Sensor V2]
# (http://wiki.seeedstudio.com/Grove-12_Key_Capacitive_I2C_Touch_Sensor_V2-MPR121/) on I2C2
# [Grove โ Speaker](http://wiki.seeedstudio.com/Grove-Speaker/)
# on UART2
# [Grove - Chainable RGB LED X 2](http://wiki.seeedstudio.com/Grove-Chainable_RGB_LED/)
# on A2
import time
import wave
import pyaudio
from Captouch import MPR121
from RGBLed import P981X
_SCALE_DEFS = [
'do.wav',
're.wav',
'me.wav',
'fa.wav',
'so.wav',
'la.wav',
'ti.wav',
'do+.wav'
]
def Play_Music(file):
"""Play WAV format music when the Mpr121 is pressed
file:the Wav format music
"""
# define stream chunk
chunk = 1024
# open a wav format music
f = wave.open(file,"rb")
# instantiate PyAudio
p = pyaudio.PyAudio()
#define callback function
def callback(in_data, frame_count, time_info, status):
data = f.readframes(frame_count)
#the function will return pyaudio.paContinue when the Mpr121 is pressed
if Mpr121Data[0] != 0:
return (data,pyaudio.paContinue)
return (data,pyaudio.paComplete)
# open stream
stream = p.open(format = p.get_format_from_width(f.getsampwidth()),
channels = f.getnchannels(),
rate = f.getframerate(),
output = True,
stream_callback=callback)
#Start stream
stream.start_stream()
#Enter the while loop,when the Mpr121 is pressed
while stream.is_active():
global Mpr121Data
Mpr121Data = Mpr121.get()
time.sleep(0.01)
# stop stream
stream.stop_stream()
stream.close()
f.close()
# close PyAudio
p.terminate()
def main():
LED = P981X()
global Mpr121
Mpr121 = MPR121()
while True:
GetMpr121 = Mpr121.get()
Mpr121Result = GetMpr121[1]
#Mpr121Result isn't empty when the Mpr121 is pressed
if any(Mpr121Result) != False:
#Check the which one button is pressed on Mpr12 then play
#different music and turn on LED that will display different color
for i in range(12):
if(Mpr121Result[i] == 1):
if i > 3 :
LED.set(0,((i-4)&0x01)*255,((i-4)&0x02)*255,((i-4)&0x04)*255)
LED.set(1,((i-4)&0x01)*255,((i-4)&0x02)*255,((i-4)&0x04)*255)
Play_Music("/tmp/scale/%s"%_SCALE_DEFS[i-4])
else :
LED.set(0,(i&0x01)*255,(i&0x02)*255,(i&0x04)*255)
LED.set(1,(i&0x01)*255,(i&0x02)*255,(i&0x04)*255)
if i == 0:
LED.set(0,50,50,200)
LED.set(1,50,50,200)
Play_Music("/tmp/scale/%s"%_SCALE_DEFS[i])
else :
LED.set(0,0,0,0)
LED.set(1,0,0,0)
time.sleep(0.05)
if __name__ == "__main__":
main() | 34.7 | 105 | 0.540826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,003 | 0.32096 |
8cf1600e5f77551d25752ca057e78d7b4032d079 | 518 | py | Python | host/rdmem.py | flowswitch/phison | d9415a8d5c62354d09cd6410754c9d8bb65e164f | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | host/rdmem.py | flowswitch/phison | d9415a8d5c62354d09cd6410754c9d8bb65e164f | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | host/rdmem.py | flowswitch/phison | d9415a8d5c62354d09cd6410754c9d8bb65e164f | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | """read XDATA memory"""
import sys
import PyScsi as drv
import Phison as ph
from util import BinFile
if len(sys.argv)!=4:
sys.exit("Read chip internal memory\nUsage: %s <file> <addr> <size>\nExample: %s ram.bin 0 0x10000" % (sys.argv[0], sys.argv[0]))
addr = int(sys.argv[2], 0)
size = int(sys.argv[3], 0)
disk = ph.FindDrive()
if not disk:
sys.exit("No Phison devices found !")
drv.err_mode = drv.err_mode_raise
drv.open(disk)
print "Reading..."
BinFile.save(sys.argv[1], ph.ReadMemory(addr, size))
drv.close()
| 23.545455 | 130 | 0.69305 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.293436 |
8cf1b63f0ee0ab41801ca7a509ae506ce2873aca | 2,672 | py | Python | zapper/context.py | alfuananzo/zapper | 8a87e332ebdad8803058793c036a1426b9240b98 | [
"MIT"
] | 1 | 2019-05-07T09:56:49.000Z | 2019-05-07T09:56:49.000Z | zapper/context.py | alfuananzo/zapper | 8a87e332ebdad8803058793c036a1426b9240b98 | [
"MIT"
] | null | null | null | zapper/context.py | alfuananzo/zapper | 8a87e332ebdad8803058793c036a1426b9240b98 | [
"MIT"
] | null | null | null | #TODO: Kill scans if overwrite is set.
from zapper.helpers import report_to_cli
class context:
def __init__(self, target, scope, api, force=False):
"""
Control one Context entry of ZAP. A context has the following properties:
Attributes:
target: The target of the scan
scope: The scope that is allowed to scan
api: The ZAP api that it can call. Expectes the zapper.api class
force: Overwrite the context if a current scan is going
"""
# Remove special chars from target so it can be set as context name
self.context = target.replace('/', '')
self.scope = scope
self.api = api
self.force = force
try:
self.api.call('POST', 'JSON/context/action/importContext', {'zapapiformat': 'JSON', 'formMethod': 'POST', 'contextFile': '%s.context' % self.context})
report_to_cli("Found existing context %s, importing" % self.context)
self.context_id = context_info['contextId']
return True
except:
contexts = self.api.call('GET', 'JSON/context/view/contextList/?zapapiformat=JSON&formMethod=GET').json()
if self.context in contexts['contextList']:
if self.force:
self.delete()
else:
report_to_cli('ZAP is already scanning %s, exiting.' % self.context)
exit(1)
context_info = self.api.call('POST', 'JSON/context/action/newContext', {'zapapiformat': 'JSON', 'formMethod': 'POST', 'contextName': self.context}).json()
self.context_id = context_info['contextId']
report_to_cli("Created new ZAP context %s with context ID %s" % (self.context, self.context_id))
# Include the domain(s) into the context
for scope_url in self.scope:
self.api.call('POST', 'JSON/context/action/includeInContext', {'zapapiformat': 'JSON', 'formMethod': 'POST', 'contextName': self.context, 'regex': ".*" + scope_url + ".*"})
def delete(self):
self.api.call('POST', 'JSON/context/action/removeContext', {'zapapiformat': 'JSON', 'formMethod': 'POST', 'contextName': self.context})
report_to_cli("Removed ZAP context %s" % self.context)
def name(self):
return self.context
def id(self):
return self.context_id
def store(self):
report_to_cli('Storing context %s' % self.context)
self.api.call('POST', 'JSON/context/action/exportContext', {'zapapiformat': 'JSON', 'formMethod': 'POST', 'contextName': self.context, 'contextFile': '%s.context' % self.context})
| 44.533333 | 188 | 0.611901 | 2,589 | 0.968937 | 0 | 0 | 0 | 0 | 0 | 0 | 1,293 | 0.483907 |
8cf3894e77f3b424b8d63437a58e95928dde4ed7 | 652 | py | Python | src/280. Wiggle Sort.py | rajshrivastava/LeetCode | dfe6342fe22b324429b0be3e5c0fef46c7e6b3b0 | [
"MIT"
] | 1 | 2019-12-16T08:18:25.000Z | 2019-12-16T08:18:25.000Z | src/280. Wiggle Sort.py | rajshrivastava/LeetCode | dfe6342fe22b324429b0be3e5c0fef46c7e6b3b0 | [
"MIT"
] | null | null | null | src/280. Wiggle Sort.py | rajshrivastava/LeetCode | dfe6342fe22b324429b0be3e5c0fef46c7e6b3b0 | [
"MIT"
] | null | null | null | class Solution:
def wiggleSort(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
'''
[3,5,2,1,6,4]
[3,5,1,6,2,4]
[4,3,2,1]
[3,4,2,1]
[6,6,5,6,3,8]
'''
def is_correct_order(x, y, isAscending):
return x <= y if isAscending else x >= y
isAscending = True
for i in range(1, len(nums)):
if not is_correct_order(nums[i-1], nums[i], isAscending):
nums[i-1], nums[i] = nums[i], nums[i-1]
isAscending = not isAscending
| 27.166667 | 69 | 0.455521 | 633 | 0.970859 | 0 | 0 | 0 | 0 | 0 | 0 | 203 | 0.31135 |
8cf42e6fb01de06121f96bde9ffd0a653f7105a5 | 2,718 | py | Python | absl/flags/tests/argparse_flags_test_helper.py | alexhagen/abseil-py | 240f85b0fa45acd95ca0157146245e7fb5453337 | [
"Apache-2.0"
] | 1,969 | 2017-04-24T22:21:29.000Z | 2022-03-30T13:27:09.000Z | absl/flags/tests/argparse_flags_test_helper.py | alexhagen/abseil-py | 240f85b0fa45acd95ca0157146245e7fb5453337 | [
"Apache-2.0"
] | 111 | 2017-09-27T05:45:53.000Z | 2022-03-29T16:48:49.000Z | third_party/abseil/py/absl/flags/tests/argparse_flags_test_helper.py | InclusiveTechNU/v11 | d32357bf52f5d6cf4ad3002235d1144f1f347d96 | [
"Apache-2.0"
] | 240 | 2017-09-26T01:18:10.000Z | 2022-03-31T06:24:40.000Z | # Copyright 2018 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test helper for argparse_flags_test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
from absl import app
from absl import flags
from absl.flags import argparse_flags
FLAGS = flags.FLAGS
flags.DEFINE_string('absl_echo', None, 'The echo message from absl.flags.')
def parse_flags_simple(argv):
"""Simple example for absl.flags + argparse."""
parser = argparse_flags.ArgumentParser(
description='A simple example of argparse_flags.')
parser.add_argument(
'--argparse_echo', help='The echo message from argparse_flags')
return parser.parse_args(argv[1:])
def main_simple(args):
print('--absl_echo is', FLAGS.absl_echo)
print('--argparse_echo is', args.argparse_echo)
def roll_dice(args):
print('Rolled a dice:', random.randint(1, args.num_faces))
def shuffle(args):
inputs = list(args.inputs)
random.shuffle(inputs)
print('Shuffled:', ' '.join(inputs))
def parse_flags_subcommands(argv):
"""Subcommands example for absl.flags + argparse."""
parser = argparse_flags.ArgumentParser(
description='A subcommands example of argparse_flags.')
parser.add_argument('--argparse_echo',
help='The echo message from argparse_flags')
subparsers = parser.add_subparsers(help='The command to execute.')
roll_dice_parser = subparsers.add_parser(
'roll_dice', help='Roll a dice.')
roll_dice_parser.add_argument('--num_faces', type=int, default=6)
roll_dice_parser.set_defaults(command=roll_dice)
shuffle_parser = subparsers.add_parser(
'shuffle', help='Shuffle inputs.')
shuffle_parser.add_argument(
'inputs', metavar='I', nargs='+', help='Inputs to shuffle.')
shuffle_parser.set_defaults(command=shuffle)
return parser.parse_args(argv[1:])
def main_subcommands(args):
main_simple(args)
args.command(args)
if __name__ == '__main__':
main_func_name = os.environ['MAIN_FUNC']
flags_parser_func_name = os.environ['FLAGS_PARSER_FUNC']
app.run(main=globals()[main_func_name],
flags_parser=globals()[flags_parser_func_name])
| 30.2 | 75 | 0.74209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,176 | 0.432671 |
8cf584f118c9548fb028818f411c0541e04b73fa | 580 | py | Python | utf8.py | softarts/oj | 2f51f360a7a6c49e865461755aec2f3a7e721b9e | [
"Apache-2.0"
] | 3 | 2019-05-04T03:26:02.000Z | 2019-08-29T01:20:44.000Z | utf8.py | softarts/oj | 2f51f360a7a6c49e865461755aec2f3a7e721b9e | [
"Apache-2.0"
] | null | null | null | utf8.py | softarts/oj | 2f51f360a7a6c49e865461755aec2f3a7e721b9e | [
"Apache-2.0"
] | null | null | null | import glob, codecs
#configfiles = glob.glob(r'C:\Users\sam\Desktop\**\*.txt', recursive=True)
#fn="ojcpp/company/amazon_memo.txt"
for fn in glob.glob("ojcpp/company/*.cpp",recursive=True):
print(fn)
ret=True
try:
data = open(fn, "r", encoding="gbk").read()
open(fn, "w", encoding="utf-8").write(data)
except:
ret=False
"""
if ret:
continue
try:
data = open(fn, "r", encoding="utf-8").read()
open(fn, "w", encoding="utf-8").write(data)
except:
pass
"""
| 20.714286 | 74 | 0.525862 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 341 | 0.587931 |
8cf7205decff15f66c7b82d0402b54b1a61e8918 | 476 | py | Python | setup.py | Emrys-Merlin/monitor_airquality | 40a734c8a2cc24d826b61f26e863a5c0913b7106 | [
"MIT"
] | null | null | null | setup.py | Emrys-Merlin/monitor_airquality | 40a734c8a2cc24d826b61f26e863a5c0913b7106 | [
"MIT"
] | null | null | null | setup.py | Emrys-Merlin/monitor_airquality | 40a734c8a2cc24d826b61f26e863a5c0913b7106 | [
"MIT"
] | null | null | null | from importlib.metadata import entry_points
from setuptools import find_packages, setup
setup(
name='monitor_airquality',
version='0.1',
url='',
author='Tim Adler',
author_email='tim+github@emrys-merlin.de',
description='Measure airquality using some sensors connected to a raspberry pi',
packages=find_packages(),
install_requires=[],
entry_points={
'console_scripts': ['monitor_airquality=monitor_airquality.main:main']
}
)
| 26.444444 | 84 | 0.712185 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 199 | 0.418067 |
8cf74af9632143ddcfbd48ed22ead989c9000359 | 8,557 | py | Python | os_migrate/plugins/module_utils/reference.py | mrnold/os-migrate | 2aa04ad4c2cf6d1e8abb24f845c7c9009fcd58fd | [
"Apache-2.0"
] | null | null | null | os_migrate/plugins/module_utils/reference.py | mrnold/os-migrate | 2aa04ad4c2cf6d1e8abb24f845c7c9009fcd58fd | [
"Apache-2.0"
] | null | null | null | os_migrate/plugins/module_utils/reference.py | mrnold/os-migrate | 2aa04ad4c2cf6d1e8abb24f845c7c9009fcd58fd | [
"Apache-2.0"
] | null | null | null | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
def network_name(conn, id_, required=True):
"""Fetch name of Network identified by ID `id_`. Use OpenStack
SDK connection `conn` to fetch the info. If `required`, ensure the
fetch is successful.
Returns: the name, or None if not found and not `required`
Raises: openstack's ResourceNotFound when `required` but not found
"""
return _fetch_name(conn.network.find_network, id_, required)
def network_id(conn, name, required=True):
"""Fetch ID of Network identified by name `name`. Use OpenStack
SDK connection `conn` to fetch the info. If `required`, ensure the
fetch is successful.
Returns: the ID, or None if not found and not `required`
Raises: openstack's ResourceNotFound when `required` but not found
"""
return _fetch_id(conn.network.find_network, name, required)
def network_flavor_name(conn, id_, required=True):
"""Fetch name of Network Flavor identified by ID `id_`. Use OpenStack
SDK connection `conn` to fetch the info. If `required`, ensure the
fetch is successful.
Returns: the name, or None if not found and not `required`
Raises: openstack's ResourceNotFound when `required` but not found
"""
return _fetch_name(conn.network.find_flavor, id_, required)
def network_flavor_id(conn, name, required=True):
"""Fetch ID of Network Flavor identified by name `name`. Use OpenStack
SDK connection `conn` to fetch the info. If `required`, ensure the
fetch is successful.
Returns: the ID, or None if not found and not `required`
Raises: openstack's ResourceNotFound when `required` but not found
"""
return _fetch_id(conn.network.find_flavor, name, required)
def qos_policy_name(conn, id_, required=True):
"""Fetch name of QoS Policy identified by ID `id_`. Use OpenStack SDK
connection `conn` to fetch the info. If `required`, ensure the
fetch is successful.
Returns: the name, or None if not found and not `required`
Raises: openstack's ResourceNotFound when `required` but not found
"""
return _fetch_name(conn.network.find_qos_policy, id_, required)
def qos_policy_id(conn, name, required=True):
"""Fetch ID of QoS Policy identified by name `name`. Use OpenStack SDK
connection `conn` to fetch the info. If `required`, ensure the
fetch is successful.
Returns: the ID, or None if not found and not `required`
Raises: openstack's ResourceNotFound when `required` but not found
"""
return _fetch_id(conn.network.find_qos_policy, name, required)
def router_name(conn, id_, required=True):
"""Fetch name of Router identified by ID `id_`. Use OpenStack
SDK connection `conn` to fetch the info. If `required`, ensure the
fetch is successful.
Returns: the name, or None if not found and not `required`
Raises: openstack's ResourceNotFound when `required` but not found
"""
return _fetch_name(conn.network.find_router, id_, required)
def router_id(conn, name, required=True):
"""Fetch ID of Router identified by name `name`. Use OpenStack
SDK connection `conn` to fetch the info. If `required`, ensure the
fetch is successful.
Returns: the ID, or None if not found and not `required`
Raises: openstack's ResourceNotFound when `required` but not found
"""
return _fetch_id(conn.network.find_router, name, required)
def security_group_name(conn, id_, required=True):
"""Fetch name of the Security Group identified by ID `id_`. Use OpenStack SDK
connection `conn` to fetch the info. If `required`, ensure the
fetch is successful.
Returns: the name, or None if not found and not `required`
Raises: openstack's ResourceNotFound when `required` but not found
"""
return _fetch_name(conn.network.find_security_group, id_, required)
def security_group_id(conn, name, required=True):
"""Fetch ID of Security group identified by name `name`. Use OpenStack SDK
connection `conn` to fetch the info. If `required`, ensure the
fetch is successful.
Returns: the ID, or None if not found and not `required`
Raises: openstack's ResourceNotFound when `required` but not found
"""
return _fetch_id(conn.network.find_security_group, name, required)
def subnet_name(conn, id_, required=True):
"""Fetch name of Subnet identified by ID `id_`. Use OpenStack
SDK connection `conn` to fetch the info. If `required`, ensure the
fetch is successful.
Returns: the name, or None if not found and not `required`
Raises: openstack's ResourceNotFound when `required` but not found
"""
return _fetch_name(conn.network.find_subnet, id_, required)
def subnet_id(conn, name, required=True):
"""Fetch ID of Subnet identified by name `name`. Use OpenStack
SDK connection `conn` to fetch the info. If `required`, ensure the
fetch is successful.
Returns: the ID, or None if not found and not `required`
Raises: openstack's ResourceNotFound when `required` but not found
"""
return _fetch_id(conn.network.find_subnet, name, required)
def segment_name(conn, id_, required=True):
"""Fetch name of Segment identified by ID `id_`. Use OpenStack
SDK connection `conn` to fetch the info. If `required`, ensure the
fetch is successful.
Returns: the name, or None if not found and not `required`
Raises: openstack's ResourceNotFound when `required` but not found
"""
return _fetch_name(conn.network.find_segment, id_, required)
def segment_id(conn, name, required=True):
"""Fetch ID of Segment identified by name `name`. Use OpenStack
SDK connection `conn` to fetch the info. If `required`, ensure the
fetch is successful.
Returns: the ID, or None if not found and not `required`
Raises: openstack's ResourceNotFound when `required` but not found
"""
return _fetch_id(conn.network.find_segment, name, required)
def server_flavor_name(conn, id_, required=True):
"""Fetch name of server flavor identified by ID `id_`. Use OpenStack
SDK connection `conn` to fetch the info. If `required`, ensure the
fetch is successful.
Returns: the name, or None if not found and not `required`
Raises: openstack's ResourceNotFound when `required` but not found
"""
return _fetch_name(conn.compute.find_flavor, id_, required)
def server_flavor_id(conn, name, required=True):
"""Fetch ID of server flavor identified by name `name`. Use OpenStack
SDK connection `conn` to fetch the info. If `required`, ensure the
fetch is successful.
Returns: the ID, or None if not found and not `required`
Raises: openstack's ResourceNotFound when `required` but not found
"""
return _fetch_id(conn.compute.find_flavor, name, required)
def subnet_pool_name(conn, id_, required=True):
"""Fetch name of Subnet Pool identified by ID `id_`. Use OpenStack
SDK connection `conn` to fetch the info. If `required`, ensure the
fetch is successful.
Returns: the name, or None if not found and not `required`
Raises: openstack's ResourceNotFound when `required` but not found
"""
return _fetch_name(conn.network.find_subnet_pool, id_, required)
def subnet_pool_id(conn, name, required=True):
"""Fetch ID of Subnet Pool identified by name `name`. Use OpenStack
SDK connection `conn` to fetch the info. If `required`, ensure the
fetch is successful.
Returns: the ID, or None if not found and not `required`
Raises: openstack's ResourceNotFound when `required` but not found
"""
return _fetch_id(conn.network.find_subnet_pool, name, required)
def _fetch_name(get_method, id_, required=True):
"""Use `get_method` to fetch an OpenStack SDK resource by `id_` and
return its name. If `required`, ensure the fetch is successful.
Returns: the ID, or None if not found and not `required`
Raises: openstack's ResourceNotFound when `required` but not found
"""
if id_ is not None:
return get_method(id_, ignore_missing=not required)['name']
def _fetch_id(get_method, name, required=True):
"""Use `get_method` to fetch an OpenStack SDK resource by `name` and
return its ID. If `required`, ensure the fetch is successful.
Returns: the ID, or None if not found and not `required`
Raises: openstack's ResourceNotFound when `required` but not found
"""
if name is not None:
return get_method(name, ignore_missing=not required)['id']
| 35.213992 | 81 | 0.714736 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,044 | 0.706322 |
8cf86bfb6eec122aaf737782baf668c53d57edad | 363 | py | Python | Task1E.py | dan7267/1a-flood-risk-project-93 | d95cee987f5673d637626e1804f719371a25daa8 | [
"MIT"
] | null | null | null | Task1E.py | dan7267/1a-flood-risk-project-93 | d95cee987f5673d637626e1804f719371a25daa8 | [
"MIT"
] | null | null | null | Task1E.py | dan7267/1a-flood-risk-project-93 | d95cee987f5673d637626e1804f719371a25daa8 | [
"MIT"
] | null | null | null | from floodsystem.stationdata import MonitoringStation
from floodsystem.geo import rivers_by_station_number
def run():
"""Requirements for Task1E"""
rivers_station_number = rivers_by_station_number(MonitoringStation, 9)
print(rivers_station_number)
if __name__ == "__main__":
print("*** Task 1E: CUED Part IA Flood Warning System ***")
run() | 30.25 | 74 | 0.752066 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.250689 |
8cf8d6f44600fb995dd1f41219e9e6510db6f3d8 | 998 | py | Python | test/test_util.py | konfiger/konfiger-python | 294fb2fed8a46f7e242825fc0b723b0ff7132c8c | [
"MIT"
] | 4 | 2019-09-25T02:18:43.000Z | 2020-01-21T19:16:05.000Z | test/test_util.py | keyvaluedb/key-value-db-python | 294fb2fed8a46f7e242825fc0b723b0ff7132c8c | [
"MIT"
] | null | null | null | test/test_util.py | keyvaluedb/key-value-db-python | 294fb2fed8a46f7e242825fc0b723b0ff7132c8c | [
"MIT"
] | null | null | null | #!python
import unittest
import os
import sys
sys.path.insert(0, os.getcwd())
from src import escape_string, un_escape_string
class TestKonfigerUtil(unittest.TestCase):
def test_check_escape_and_unescape_separator(self):
actual_str = "\\,HelloยฌW\n-\t-\torld"
t1 = escape_string(actual_str, 'ยฌ')
t2 = un_escape_string(actual_str)
self.assertNotEqual(actual_str, t1)
self.assertEqual(t1, "\\,Hello^ยฌW\n-\t-\torld")
self.assertNotEqual(t1, un_escape_string(t1, 'ยฌ'))
self.assertNotEqual(actual_str, un_escape_string(t1))
self.assertEqual(un_escape_string(t1, 'ยฌ'), actual_str)
self.assertNotEqual(t1, t2)
self.assertEqual(t2, "\\,HelloยฌW\n-\t-\torld")
self.assertNotEqual(t2, un_escape_string(t1))
self.assertEqual(actual_str, un_escape_string(t2))
self.assertEqual(un_escape_string(t1, 'ยฌ'), actual_str)
if __name__ == '__main__':
unittest.main()
| 33.266667 | 64 | 0.655311 | 821 | 0.816915 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.109453 |
8cfbc2509615626710ec962d624548fc6620ce7a | 16,310 | py | Python | src/insulaudit/devices/clmm/proto.py | kakoni/insulaudit | 18fe0802bafe5764882ac4e65e472fdc840baa45 | [
"MIT"
] | 1 | 2020-11-28T13:23:58.000Z | 2020-11-28T13:23:58.000Z | src/insulaudit/devices/clmm/proto.py | kakoni/insulaudit | 18fe0802bafe5764882ac4e65e472fdc840baa45 | [
"MIT"
] | null | null | null | src/insulaudit/devices/clmm/proto.py | kakoni/insulaudit | 18fe0802bafe5764882ac4e65e472fdc840baa45 | [
"MIT"
] | null | null | null | import struct
import sys
import serial
import time
import logging
from pprint import pprint, pformat
import doctest
from insulaudit.core import Command
from insulaudit.clmm.usbstick import *
from insulaudit import lib
#logging.basicConfig( stream=sys.stdout )
log = logging.getLogger(__name__)
#log.setLevel( logging.DEBUG )
log.info( 'hello world' )
io = logging.getLogger('.'.join(['io', __name__, 'io' ]))
#io = log
#io.setLevel( logging.DEBUG )
"""
######################
#
# ComLink2
# pseudocode analysis of critical procedures
# there is some implicit OO going on
#
execute(command):
usbcommand.execute(self)
############################
#
# USB(Pump) Command Stuff
#
packSerialNumber:
return makePackedBCD(serial)
"""
"""
######################
#
# Pump
#
# every command needs:
# code, retries, params, length, pages
initDevice:
# cmdPowerControl Command(93, "rf power on", 2)
# cmdPowerControl.params = [ 1, 1 ]
# cmdPowerControl.retries = 0
# cmdReadErrorStatus = Command(117, "read pump error status")
# cmdReadState = Command(131, "Read Pump State")
# cmdReadTempBasal = Command(152, "Read Temporary Basal")
initDevice2
iniDevice2:
detectActiveBolus = Command(76, "set temp basal rate (bolus detection only)", 3)
detectActiveBolus.params = [ 0, 0, 0 ]
detectActiveBolus.retries = 0
detectActiveBolus:
# cmdDetectBolus
shutDownPump
if suspended:
shutDownPump2()
cmdCancelSuspend()
# turn rf power off
# retries 0
cmdOff = Command(93, "rf power off", [ 0 ], 2)
cmdOff.execute
shutDownPump2:
Command(91, "keypad push (ack)", [ 2 ], 1).execute
time.sleep(.500)
Command(91, "keypad push (esc)", [ 1 ], 1).execute
time.sleep(.500)
getNAKDescription:
# pass
# 2 params
Command(code, descr)
# 5: code, descr, bytesPerRecord, maxRecords, maxRetries
return Command(code, descr, 64, 1, 0)
# 3 params
Command(code, descr, paramCount):
# 5
#
com = Command(code, descr, 0, 1, 11)
com.paramCount = paramCount
numblocks = paramCount / 64 + 1
# 4 params
Command(code, descr, params, tail)
# 5
com = Command(code, descr, 0, 1, 11)
com.params = params
#com.paramCount
# 5 params
Command(code, descr, bytesPerRecord, maxRecords, ??):
# likely decompile error
# 7
Command(code, descr, bytesPerRecord, maxRecords, 0, 0, paramCount)
dataOffset = 0
cmdLength = 2
# 7 params
Command(code, descr, bytesPerRecord, maxRecords, address, addressLength, arg8):
offset = 2
if addressLength == 1:
cmdLength = 2 + addressLength
else:
cmdLength = 2 + addressLength + 1
retries = 2
# 511
execute:
result = None
for i in xrange(maxRetries)
# reset bytes read
response = usb.execute(self)
# handle stack trace
if response: break
return result
"""
"""
"""
class ProtocolError(Exception): pass
class DeviceCommsError(ProtocolError): pass
class AckError(DeviceCommsError): pass
def retry(block, retry=3, sleep=0):
r = None
for i in xrange(retry):
log.info('retry:%s:%i' % (block, i))
r = block( )
if r:
return r
if sleep:
time.sleep(sleep)
return r
class Link( core.CommBuffer ):
class ID:
VENDOR = 0x0a21
PRODUCT = 0x8001
timeout = .100
def __init__( self, port, timeout=None ):
super(type(self), self).__init__(port, timeout)
def setTimeout(self, timeout):
self.serial.setTimeout(timeout)
def getTimeout(self):
return self.serial.getTimeout()
def initUSBComms(self):
def init( ):
init = False
try:
self.initCommunicationsIO()
init = True
except ProtocolError, e: pass
return init
if not retry(init):
raise ProtocolError("could not init usb module")
#self.initDevice()
def getSignalStrength(self):
result = self.readSignalStrength()
signal = result[0]
def readSignalStrength(self):
result = self.sendComLink2Command(6, 0)
# result[0] is signal strength
log.info('%r:readSignalStrength:%s' % (self, int(result[0])))
return result
def initCommunicationsIO(self):
# close/open serial
self.readProductInfo( )
self.readSignalStrength()
def endCommunicationsIO(self):
self.readSignalStrength()
self.readInterfaceStatistics()
# close port
self.close()
def readProductInfo(self):
result = self.sendComLink2Command(4)
# 1/0/255
log.info('readProductInfo:result')
freq = result[5]
info = self.decodeProductInfo(result)
log.info('product info: %s' % pformat(info))
# decodeInterface stats
def decodeProductInfo(self, data):
class F:
body = data
comm = USBProductInfo()
comm.reply = F()
comm.onACK()
return comm.info
def sendComLink2Command(self, msg, a2=0x00, a3=0x00):
# generally commands are 3 bytes, most often CMD, 0x00, 0x00
msg = bytearray([ msg, a2, a3 ])
io.info('sendComLink2Command:write')
self.write(msg)
return retry(self.checkAck, sleep=.100)
# throw local usb exception
def checkAck(self):
time.sleep(.100)
result = bytearray(self.read(64))
if len(result) == 0:
raise AckError('checkAck must have a response')
io.info('checkAck:read')
commStatus = result[0]
# usable response
#assert commStatus == 1
if commStatus != 1:
raise DeviceCommsError('\n'.join([ "checkAck: bad response code"
, lib.hexdump(result[0:4]) ]))
status = result[1]
# status == 102 'f' NAK, look up NAK
if status == 85: # 'U'
log.info('ACK OK')
return result[3:]
assert False, "NAK!!"
def decodeIFaceStats(self, data):
class F:
body = data
comm = InterfaceStats()
comm.reply = F()
comm.onACK()
return comm.info
def readInterfaceStatistics(self):
# decode and log stats
result = self.sendComLink2Command(5, 0)
info = self.decodeIFaceStats(result)
log.info("read radio Interface Stats: %s" % pformat(info))
result = self.sendComLink2Command(5, 1)
info = self.decodeIFaceStats(result)
log.info("read stick Interface Stats: %s" % pformat(info))
#######################
#
#
#
def CRC8(data):
return lib.CRC8.compute(data)
################################
# Remote Stuff
#
class BaseCommand(object):
code = 0x00
descr = "(error)"
retries = 2
timeout = 3
params = [ ]
bytesPerRecord = 0
maxRecords = 0
effectTime = 1
def __init__(self, code, descr, *args):
self.code = code
self.descr = descr
self.params = [ ]
def __repr__(self):
fields = [ 'descr', 'timeout', 'effectTime', 'code' ]
details = [''] \
+ [ "\t%8s: %s" % (f, str(getattr(self, f))) for f in fields ]
summary = '<{name}:{descr}>'.format(name=self.__class__.__name__,
descr=self.descr)
kwds = dict(details='\n'.join(map(str, details)), summary=summary)
return "{summary}{details}".format(**kwds)
def format(self):
pass
def allocateRawData(self):
self.raw = self.bytesPerRecord * self.maxRecords
class Device(object):
def __init__(self, link):
self.link = link
def execute(self, command):
self.command = command
#try:
# self.allocateRawData()
# self.sendAndRead()
#except DeviceCommsError, e:
# raise
errors = [ ]
that = self
def execute( ):
try:
that.allocateRawData()
that.sendAndRead()
return True
except DeviceCommsError, e:
errors.append(e)
return False
if not retry(execute, sleep=.150):
raise DeviceCommsError('\n'.join([ "tried executing %s bunch of times and failed"
, "%s" % ('\n\t'.join(map(str, errors))) ]) % self.command)
def sendAndRead(self):
self.sendDeviceCommand()
time.sleep(self.command.effectTime)
if self.expectedLength > 0:
# in original code, this modifies the length tested in the previous if
# statement
self.command.data = self.readDeviceData()
def sendDeviceCommand(self):
packet = self.buildTransmitPacket()
io.info('sendDeviceCommand:write:%r' % (self.command))
self.link.write(packet)
time.sleep(.500)
code = self.command.code
params = self.command.params
if code != 93 or params[0] != 0:
self.link.checkAck()
def allocateRawData(self):
self.command.allocateRawData()
self.expectedLength = self.command.bytesPerRecord * self.command.maxRecords
def readDeviceData(self):
self.eod = False
results = bytearray( )
while not self.eod:
data = self.readDeviceDataIO( )
results.extend(data)
return results
def readDeviceDataIO(self):
results = self.readData()
lb, hb = results[5] & 0x7F, results[6]
self.eod = (results[5] & 0x80) > 0
resLength = lib.BangInt((lb, hb))
assert resLength > 63, ("cmd low byte count:\n%s" % lib.hexdump(results))
data = results[13:13+resLength]
assert len(data) == resLength
crc = results[-1]
# crc check
log.info('readDeviceDataIO:msgCRC:%r:expectedCRC:%r:data:%r' % (crc, CRC8(data), data))
assert crc == CRC8(data)
return data
def readData(self):
bytesAvailable = self.getNumBytesAvailable()
packet = [12, 0, lib.HighByte(bytesAvailable), lib.LowByte(bytesAvailable)]
packet.append( CRC8(packet) )
response = self.writeAndRead(packet, bytesAvailable)
# assert response.length > 14
# assert (int(response[0]) == 2), repr(response)
rcode = response[0]
if len(response) < 14:
raise DeviceCommsError('\n'.join([ "readData: insufficientData",
lib.hexdump(response) ]))
if rcode != 2:
raise DeviceCommsError("readData: bad response code: %#04x" % rcode)
# response[1] != 0 # interface number !=0
# response[2] == 5 # timeout occurred
# response[2] == 2 # NAK
# response[2] # should be within 0..4
log.info("readData ACK")
return response
def writeAndRead(self, msg, length):
io.info("writeAndRead:")
self.link.write(bytearray(msg))
time.sleep(.300)
self.link.setTimeout(self.command.timeout)
return bytearray(self.link.read(length))
def getNumBytesAvailable(self):
#result = self.readStatus( )
result = 0
start = time.time()
i = 0
while result == 0 and time.time() - start < 4:
log.debug('%r:getNumBytesAvailable:attempt:%s' % (self, i))
result = self.readStatus( )
time.sleep(.100)
i += 1
log.info('getNumBytesAvailable:%s' % result)
return result
def readStatus(self):
"""
result = False
def fetch_status( ):
res = self.link.sendComLink2Command(3)
status = res[0] # 0 indicates success
if status == 0:
result = res
return True
return False
if not retry(fetch_status) or not result or len(result) == 0:
raise RFFailed("rf read header indicates failure")
"""
result = self.link.sendComLink2Command(3)
commStatus = result[0] # 0 indicates success
status = result[2]
lb, hb = result[3], result[4]
stat = StickStatusStruct(status)
header = result[0:3]
test = [ StickStatusStruct(s) for s in header ]
log.info(test)
log.info("HEADER:\n%s" % lib.hexdump(header))
if 0 != commStatus:
raise DeviceCommsError('\n'.join([ "rf read header indicates failure"
, "%s" % lib.hexdump(header) ]))
assert commStatus == 0, ("command status not 0: %s:%s" % (commStatus, stat))
bytesAvailable = lib.BangInt((lb, hb))
self.status = status
if (status & 0x1) > 0:
return bytesAvailable
return 0
def buildTransmitPacket(self):
return self.command.format( )
class PumpCommand(BaseCommand):
serial = '665455'
#serial = '206525'
params = [ ]
bytesPerRecord = 64
maxRecords = 1
retries = 2
__fields__ = ['maxRecords', 'code', 'descr',
'serial', 'bytesPerRecord', 'params']
def __init__(self, **kwds):
for k in self.__fields__:
value = kwds.get(k, getattr(self, k))
setattr(self, k, value)
def getData(self):
return self.data
def format(self):
params = self.params
code = self.code
maxRetries = self.retries
serial = list(bytearray(self.serial.decode('hex')))
paramsCount = len(params)
head = [ 1, 0, 167, 1 ]
# serial
packet = head + serial
# paramCount 2 bytes
packet.extend( [ (0x80 | lib.HighByte(paramsCount)),
lib.LowByte(paramsCount) ] )
# not sure what this byte means
button = 0
# special case command 93
if code == 93:
button = 85
packet.append(button)
packet.append(maxRetries)
# how many packets/frames/pages/flows will this take?
responseSize = self.calcRecordsRequired()
# really only 1 or 2?
pages = responseSize
if responseSize > 1:
pages = 2
packet.append(pages)
packet.append(0)
# command code goes here
packet.append(code)
packet.append(CRC8(packet))
packet.extend(params)
packet.append(CRC8(params))
io.info(packet)
return bytearray(packet)
def calcRecordsRequired(self):
length = self.bytesPerRecord * self.maxRecords
i = length / 64
j = length % 64
if j > 0:
return i + 1
return i
class PowerControl(PumpCommand):
"""
>>> PowerControl().format() == PowerControl._test_ok
True
"""
_test_ok = bytearray( [ 0x01, 0x00, 0xA7, 0x01, 0x66, 0x54, 0x55, 0x80,
0x02, 0x55, 0x00, 0x00, 0x00, 0x5D, 0xE6, 0x01,
0x0A, 0xA2 ] )
code = 93
descr = "RF Power On"
params = [ 0x01, 0x0A ]
retries = 0
maxRecords = 0
timeout = 17
effectTime = 17
class PowerControlOff(PowerControl):
params = [ 0x00, 0x0A ]
class ReadErrorStatus(PumpCommand):
"""
>>> ReadErrorStatus().format() == ReadErrorStatus._test_ok
True
"""
_test_ok = bytearray([ 0x01, 0x00, 0xA7, 0x01, 0x66, 0x54, 0x55, 0x80,
0x00, 0x00, 0x02, 0x01, 0x00, 0x75, 0xD7, 0x00 ])
code = 117
descr = "Read Error Status any current alarms set?"
params = [ ]
retries = 2
maxRecords = 1
class ReadPumpState(PumpCommand):
"""
>>> ReadPumpState().format() == ReadPumpState._test_ok
True
"""
_test_ok = bytearray([ 0x01, 0x00, 0xA7, 0x01, 0x66, 0x54, 0x55, 0x80,
0x00, 0x00, 0x02, 0x01, 0x00, 0x83, 0x2E, 0x00 ])
code = 131
descr = "Read Pump State"
params = [ ]
retries = 2
maxRecords = 1
class ReadPumpModel(PumpCommand):
"""
>>> ReadPumpModel().format() == ReadPumpModel._test_ok
True
"""
code = 141
descr = "Read Pump Model Number"
params = [ ]
retries = 2
maxRecords = 1
_test_ok = bytearray([ 0x01, 0x00, 0xA7, 0x01, 0x66, 0x54, 0x55, 0x80,
0x00, 0x00, 0x02, 0x01, 0x00, 0x8D, 0x5B, 0x00 ])
def getData(self):
data = self.data
length = data[0]
msg = data[1:1+length]
self.model = msg
return str(msg)
def initDevice(link):
device = Device(link)
comm = PowerControl()
device.execute(comm)
log.info('comm:%s:data:%s' % (comm, getattr(comm, 'data', None)))
comm = ReadErrorStatus()
device.execute(comm)
log.info('comm:%s:data:%s' % (comm, getattr(comm, 'data', None)))
comm = ReadPumpState()
device.execute(comm)
log.info('comm:%s:data:%s' % (comm, getattr(comm, 'data', None)))
return device
def do_commands(device):
comm = ReadPumpModel( )
device.execute(comm)
log.info('comm:%s:data:%s' % (comm, getattr(comm.getData( ), 'data', None)))
log.info('REMOTE PUMP MODEL NUMBER: %s' % comm.getData( ))
def shutdownDevice(device):
comm = PowerControlOff()
device.execute(comm)
log.info('comm:%s:data:%s' % (comm, getattr(comm, 'data', None)))
if __name__ == '__main__':
io.info("hello world")
doctest.testmod( )
port = None
try:
port = sys.argv[1]
except IndexError, e:
print "usage:\n%s /dev/ttyUSB0" % sys.argv[0]
sys.exit(1)
link = Link(port)
link.initUSBComms()
device = initDevice(link)
do_commands(device)
#shutdownDevice(device)
link.endCommunicationsIO()
#pprint( carelink( USBProductInfo( ) ).info )
| 25.604396 | 91 | 0.62385 | 11,964 | 0.733538 | 0 | 0 | 0 | 0 | 0 | 0 | 5,375 | 0.329552 |
8cfc14873a8e3827bbe126057b1811e7460db3fe | 300 | py | Python | examples/interrupts.py | nodesign/electripy | 6765dccd93b4d71e77ae258f560a1e2eb6645128 | [
"Unlicense",
"MIT"
] | 3 | 2017-06-20T11:50:47.000Z | 2019-10-28T15:14:53.000Z | examples/interrupts.py | nodesign/electripy | 6765dccd93b4d71e77ae258f560a1e2eb6645128 | [
"Unlicense",
"MIT"
] | null | null | null | examples/interrupts.py | nodesign/electripy | 6765dccd93b4d71e77ae258f560a1e2eb6645128 | [
"Unlicense",
"MIT"
] | null | null | null | from lib.electripy import *
print "Board name : ", getBoardName()
print "INTERRUPTS TEST **************************"
def hello(data):
print "interrupt ", INTERRUPT_TYPE[data]
attachInterrupt(25, CHANGE, hello)
for a in range(0,15):
delay(1000)
print a
detachInterrupt(25)
stop() | 17.647059 | 50 | 0.63 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.236667 |
8cfc82cb9558f36c262b30e77dc6228a934d7ef7 | 15,691 | py | Python | Contents/Code/interface/menu.py | tomerblecher/Sub-Zero.bundle | afc7614095fe6b0e1d15f40671f8cc1de37b4402 | [
"MIT"
] | 2 | 2020-12-06T19:40:10.000Z | 2020-12-26T13:20:10.000Z | Contents/Code/interface/menu.py | PlexIL/Sub-Zero.bundle | e74af913f13b390fc5271def4fdc55066358d3e3 | [
"MIT"
] | null | null | null | Contents/Code/interface/menu.py | PlexIL/Sub-Zero.bundle | e74af913f13b390fc5271def4fdc55066358d3e3 | [
"MIT"
] | null | null | null | # coding=utf-8
import locale
import logging
import os
import platform
import traceback
import logger
import copy
from requests import HTTPError
from item_details import ItemDetailsMenu
from refresh_item import RefreshItem
from menu_helpers import add_incl_excl_options, dig_tree, set_refresh_menu_state, \
default_thumb, debounce, ObjectContainer, SubFolderObjectContainer, route
from main import fatality, InclExclMenu
from advanced import DispatchRestart
from subzero.constants import ART, PREFIX, DEPENDENCY_MODULE_NAMES
from support.extract import season_extract_embedded
from support.scheduler import scheduler
from support.config import config
from support.helpers import timestamp, df, display_language
from support.ignore import get_decision_list
from support.items import get_all_items, get_items_info, get_item_kind_from_rating_key, get_item, get_item_title
from support.i18n import _
# init GUI
ObjectContainer.art = R(ART)
ObjectContainer.no_cache = True
# default thumb for DirectoryObjects
DirectoryObject.thumb = default_thumb
Plugin.AddViewGroup("full_details", viewMode="InfoList", mediaType="items", type="list", summary=2)
@route(PREFIX + '/section/firstLetter/key', deeper=bool)
def FirstLetterMetadataMenu(rating_key, key, title=None, base_title=None, display_items=False, previous_item_type=None,
previous_rating_key=None):
"""
displays the contents of a section filtered by the first letter
:param rating_key: actually is the section's key
:param key: the firstLetter wanted
:param title: the first letter, or #
:param deeper:
:return:
"""
title = base_title + " > " + unicode(title)
oc = SubFolderObjectContainer(title2=title, no_cache=True, no_history=True)
items = get_all_items(key="first_character", value=[rating_key, key], base="library/sections", flat=False)
kind, deeper = get_items_info(items)
dig_tree(oc, items, MetadataMenu,
pass_kwargs={"base_title": title, "display_items": deeper, "previous_item_type": kind,
"previous_rating_key": rating_key})
return oc
@route(PREFIX + '/section/contents', display_items=bool)
def MetadataMenu(rating_key, title=None, base_title=None, display_items=False, previous_item_type=None,
previous_rating_key=None, message=None, header=None, randomize=None):
"""
displays the contents of a section based on whether it has a deeper tree or not (movies->movie (item) list; series->series list)
:param rating_key:
:param title:
:param base_title:
:param display_items:
:param previous_item_type:
:param previous_rating_key:
:return:
"""
title = unicode(title)
item_title = title
title = base_title + " > " + title
oc = SubFolderObjectContainer(title2=title, no_cache=True, no_history=True, header=header, message=message,
view_group="full_details")
current_kind = get_item_kind_from_rating_key(rating_key)
if display_items:
timeout = 30
show = None
# add back to series for season
if current_kind == "season":
timeout = 720
show = get_item(previous_rating_key)
oc.add(DirectoryObject(
key=Callback(MetadataMenu, rating_key=show.rating_key, title=show.title, base_title=show.section.title,
previous_item_type="section", display_items=True, randomize=timestamp()),
title=_(u"< Back to %s", show.title),
thumb=show.thumb or default_thumb
))
elif current_kind == "series":
# it shouldn't take more than 6 minutes to scan all of a series' files and determine the force refresh
timeout = 3600
items = get_all_items(key="children", value=rating_key, base="library/metadata")
kind, deeper = get_items_info(items)
dig_tree(oc, items, MetadataMenu,
pass_kwargs={"base_title": title, "display_items": deeper, "previous_item_type": kind,
"previous_rating_key": rating_key})
# we don't know exactly where we are here, only add ignore option to series
if current_kind in ("series", "season"):
item = get_item(rating_key)
sub_title = get_item_title(item)
add_incl_excl_options(oc, current_kind, title=sub_title, rating_key=rating_key, callback_menu=InclExclMenu)
# mass-extract embedded
if current_kind == "season" and config.plex_transcoder:
for lang in config.lang_list:
oc.add(DirectoryObject(
key=Callback(SeasonExtractEmbedded, rating_key=rating_key, language=lang,
base_title=show.section.title, display_items=display_items, item_title=item_title,
title=title,
previous_item_type=previous_item_type, with_mods=True,
previous_rating_key=previous_rating_key, randomize=timestamp()),
title=_(u"Extract missing %(language)s embedded subtitles", language=display_language(lang)),
summary=_("Extracts the not yet extracted embedded subtitles of all episodes for the current "
"season with all configured default modifications")
))
oc.add(DirectoryObject(
key=Callback(SeasonExtractEmbedded, rating_key=rating_key, language=lang,
base_title=show.section.title, display_items=display_items, item_title=item_title,
title=title, force=True,
previous_item_type=previous_item_type, with_mods=True,
previous_rating_key=previous_rating_key, randomize=timestamp()),
title=_(u"Extract and activate %(language)s embedded subtitles", language=display_language(lang)),
summary=_("Extracts embedded subtitles of all episodes for the current season "
"with all configured default modifications")
))
# add refresh
oc.add(DirectoryObject(
key=Callback(RefreshItem, rating_key=rating_key, item_title=title, refresh_kind=current_kind,
previous_rating_key=previous_rating_key, timeout=timeout * 1000, randomize=timestamp()),
title=_(u"Refresh: %s", item_title),
summary=_("Refreshes %(the_movie_series_season_episode)s, possibly searching for missing and picking up "
"new subtitles on disk", the_movie_series_season_episode=_(u"the %s" % current_kind))
))
oc.add(DirectoryObject(
key=Callback(RefreshItem, rating_key=rating_key, item_title=title, force=True,
refresh_kind=current_kind, previous_rating_key=previous_rating_key, timeout=timeout * 1000,
randomize=timestamp()),
title=_(u"Auto-Find subtitles: %s", item_title),
summary=_("Issues a forced refresh, ignoring known subtitles and searching for new ones")
))
else:
return ItemDetailsMenu(rating_key=rating_key, title=title, item_title=item_title)
return oc
@route(PREFIX + '/season/extract_embedded/{rating_key}/{language}')
def SeasonExtractEmbedded(**kwargs):
rating_key = kwargs.get("rating_key")
requested_language = kwargs.pop("language")
with_mods = kwargs.pop("with_mods")
item_title = kwargs.pop("item_title")
title = kwargs.pop("title")
force = kwargs.pop("force", False)
Thread.Create(season_extract_embedded, **{"rating_key": rating_key, "requested_language": requested_language,
"with_mods": with_mods, "force": force})
kwargs["header"] = _("Success")
kwargs["message"] = _(u"Extracting of embedded subtitles for %s triggered", title)
kwargs.pop("randomize")
return MetadataMenu(randomize=timestamp(), title=item_title, **kwargs)
@route(PREFIX + '/ignore_list')
def IgnoreListMenu():
ref_list = get_decision_list()
include = ref_list.store == "include"
list_title = _("Include list" if include else "Ignore list")
oc = SubFolderObjectContainer(title2=list_title, replace_parent=True)
for key in ref_list.key_order:
values = ref_list[key]
for value in values:
add_incl_excl_options(oc, key, title=ref_list.get_title(key, value), rating_key=value,
callback_menu=InclExclMenu)
return oc
@route(PREFIX + '/history')
def HistoryMenu():
from support.history import get_history
history = get_history()
oc = SubFolderObjectContainer(title2=_("History"), replace_parent=True)
for item in history.items[:100]:
possible_language = item.language
language_display = item.lang_name if not possible_language else display_language(possible_language)
oc.add(DirectoryObject(
key=Callback(ItemDetailsMenu, title=item.title, item_title=item.item_title,
rating_key=item.rating_key),
title=u"%s (%s)" % (item.item_title, _(item.mode_verbose)),
summary=_(u"%s in %s (%s, score: %s), %s", language_display, item.section_title,
_(item.provider_name), item.score, df(item.time)),
thumb=item.thumb or default_thumb
))
history.destroy()
return oc
@route(PREFIX + '/missing/refresh')
@debounce
def RefreshMissing(randomize=None):
scheduler.dispatch_task("SearchAllRecentlyAddedMissing")
header = "Refresh of recently added items with missing subtitles triggered"
return fatality(header=header, replace_parent=True)
def replace_item(obj, key, replace_value):
for k, v in obj.items():
if isinstance(v, dict):
obj[k] = replace_item(v, key, replace_value)
if key in obj:
obj[key] = replace_value
return obj
def check_connections():
# debug drone
Log.Debug("Checking connections ...")
log_buffer = []
try:
from subliminal_patch.refiners.drone import SonarrClient, RadarrClient
log_buffer.append(["----- Connections -----"])
for key, cls in [("sonarr", SonarrClient), ("radarr", RadarrClient)]:
if key in config.refiner_settings:
cname = key.capitalize()
try:
status = cls(**config.refiner_settings[key]).status(timeout=5)
except HTTPError, e:
if e.response.status_code == 401:
log_buffer.append(("%s: NOT WORKING - BAD API KEY", cname))
else:
log_buffer.append(("%s: NOT WORKING - %s", cname, traceback.format_exc()))
except:
log_buffer.append(("%s: NOT WORKING - %s", cname, traceback.format_exc()))
else:
if status and status["version"]:
log_buffer.append(("%s: OK - %s", cname, status["version"]))
else:
log_buffer.append(("%s: NOT WORKING - %s", cname))
except:
log_buffer.append(("Something went really wrong when evaluating Sonarr/Radarr: %s", traceback.format_exc()))
finally:
Core.log.setLevel(logging.DEBUG)
for entry in log_buffer:
Log.Debug(*entry)
Core.log.setLevel(logging.getLevelName(Prefs["log_level"]))
@route(PREFIX + '/ValidatePrefs', enforce_route=True)
def ValidatePrefs():
Core.log.setLevel(logging.DEBUG)
if Prefs["log_console"]:
Core.log.addHandler(logger.console_handler)
Log.Debug("Logging to console from now on")
else:
Core.log.removeHandler(logger.console_handler)
Log.Debug("Stop logging to console")
# cache the channel state
update_dict = False
restart = False
# reset pin
Dict["pin_correct_time"] = None
config.initialize()
if "channel_enabled" not in Dict:
update_dict = True
elif Dict["channel_enabled"] != config.enable_channel:
Log.Debug("Interface features %s, restarting plugin", "enabled" if config.enable_channel else "disabled")
update_dict = True
restart = True
if "plugin_pin_mode2" not in Dict:
update_dict = True
elif Dict["plugin_pin_mode2"] != Prefs["plugin_pin_mode2"]:
update_dict = True
restart = True
if update_dict:
Dict["channel_enabled"] = config.enable_channel
Dict["plugin_pin_mode2"] = Prefs["plugin_pin_mode2"]
Dict.Save()
if restart:
scheduler.stop()
DispatchRestart()
return
scheduler.setup_tasks()
scheduler.clear_task_data("MissingSubtitles")
set_refresh_menu_state(None)
Log.Debug("Validate Prefs called.")
# SZ config debug
Log.Debug("--- SZ Config-Debug ---")
for attr in [
"version", "app_support_path", "data_path", "data_items_path", "enable_agent",
"enable_channel", "permissions_ok", "missing_permissions", "fs_encoding",
"subtitle_destination_folder", "include", "include_exclude_paths", "include_exclude_sz_files",
"new_style_cache", "dbm_supported", "lang_list", "providers", "normal_subs", "forced_only", "forced_also",
"plex_transcoder", "refiner_settings", "unrar", "adv_cfg_path", "use_custom_dns",
"has_anticaptcha", "anticaptcha_cls", "mediainfo_bin"]:
value = getattr(config, attr)
if isinstance(value, dict):
d = replace_item(copy.deepcopy(value), "api_key", "xxxxxxxxxxxxxxxxxxxxxxxxx")
Log.Debug("config.%s: %s", attr, d)
continue
if attr in ("api_key",):
value = "xxxxxxxxxxxxxxxxxxxxxxxxx"
Log.Debug("config.%s: %s", attr, value)
for attr in ["plugin_log_path", "server_log_path"]:
value = getattr(config, attr)
if value:
access = os.access(value, os.R_OK)
if Core.runtime.os == "Windows":
try:
f = open(value, "r")
f.read(1)
f.close()
except:
access = False
Log.Debug("config.%s: %s (accessible: %s)", attr, value, access)
for attr in [
"subtitles.save.filesystem", ]:
Log.Debug("Pref.%s: %s", attr, Prefs[attr])
if "sonarr" in config.refiner_settings or "radarr" in config.refiner_settings:
Thread.Create(check_connections)
# fixme: check existance of and os access of logs
Log.Debug("----- Environment -----")
Log.Debug("Platform: %s", Core.runtime.platform)
Log.Debug("OS: %s", Core.runtime.os)
Log.Debug("Python: %s", platform.python_version())
for key, value in os.environ.iteritems():
if key.startswith("PLEX") or key.startswith("SZ_"):
if "TOKEN" in key:
outval = "xxxxxxxxxxxxxxxxxxx"
else:
outval = value
Log.Debug("%s: %s", key, outval)
Log.Debug("Locale: %s", locale.getdefaultlocale())
Log.Debug("-----------------------")
Log.Debug("Setting log-level to %s", Prefs["log_level"])
logger.register_logging_handler(DEPENDENCY_MODULE_NAMES, level=Prefs["log_level"])
Core.log.setLevel(logging.getLevelName(Prefs["log_level"]))
os.environ['U1pfT01EQl9LRVk'] = '789CF30DAC2C8B0AF433F5C9AD34290A712DF30D7135F12D0FB3E502006FDE081E'
return
| 41.510582 | 132 | 0.636734 | 0 | 0 | 0 | 0 | 12,775 | 0.814161 | 0 | 0 | 4,011 | 0.255624 |
8cfca3b3f7b05a6aebdc8eea726b3a320cf8a4ca | 7,643 | py | Python | gsicrawler_pipeline.py | antoniofll/sefarad4.0-testing | 1b50f479ee503e5e23345ab0388eb6c2608ab73d | [
"Apache-2.0"
] | null | null | null | gsicrawler_pipeline.py | antoniofll/sefarad4.0-testing | 1b50f479ee503e5e23345ab0388eb6c2608ab73d | [
"Apache-2.0"
] | null | null | null | gsicrawler_pipeline.py | antoniofll/sefarad4.0-testing | 1b50f479ee503e5e23345ab0388eb6c2608ab73d | [
"Apache-2.0"
] | null | null | null | import luigi
from luigi import configuration
from luigi.s3 import S3Target, S3PathTask
import threading
from time import sleep
import os
import json
import imp
import random
import datetime
import uuid
from bottle import route, run, template, static_file, response, request, install
import luigi
import urllib2
pending_analysis = {}
ids = {}
def return_json(result):
return json.dumps(result)
class ScrapUrlTask(luigi.Task):
#http://www.yelp.com/biz/taqueria-cazadores-san-francisco-2
url = luigi.Parameter(default="https://es.foursquare.com/v/cafeter%C3%ADa-hd/4b5b0ca9f964a520d0e028e3")
unique_id = str(uuid.uuid1())
webpage = luigi.Parameter(default="foursquare")
analysis_type = luigi.Parameter(default="sentiments")
def output(self):
return luigi.LocalTarget(path='analysis/%s.scraper' % self.unique_id)
def run(self):
#content = urllib2.urlopen(self.url).read()
#print content
#scrap_url('yelp','sentiments',self.url)
rv = {'error':None, 'loading':True, 'uuid':self.unique_id,
'analysis_type' : self.analysis_type,
'webpage':self.webpage, 'scraping':True}
pending_analysis[self.unique_id] = rv
ids[0] = self.unique_id
#Start scraper
try:
filePath = 'analysis/%s.scraper' % self.unique_id
print "######## filePath: ",filePath
scraperImported = imp.load_source(self.webpage, 'scrapers/%s.py' % (self.webpage))
#scraperImported.startScraping, args=(self.url, filePath)
scraperTask = threading.Thread(target=scraperImported.startScraping, args=(self.url, filePath))
scraperTask.start()
scraperTask.join()
with open(filePath) as result:
with self.output().open('w') as output:
json.dump(result, output)
print output
except Exception as e:
return {'error':'%s scraper doesn\'t exist' % (self.webpage), 'loading':False}
class SenpyAnalysisTask(luigi.Task):
unique_id = str(uuid.uuid1())
def requires(self):
return ScrapUrlTask()
def run(self):
print "Analisis"
#filePath = imp.load_source(ids[0], './analysis/%s.scraper' % (ids[0]))
'''
with self.input().open('r') as analysis_file:
scraped_reviews = json.loads(analysis_file.read())
analysis_types = {'sentiments'}
analysis_type = analysis_types[0]
self.startAnalysis(scraped_reviews,'sentiments',unique_id)
try:
resultPath = '%s/analysis/%s.analisis' % (os.getcwd(), self.unique_id)
importedAnalyzer = imp.load_source(analysis_types[0], '%s/analyzers/%s.py' % (os.getcwd(), analysis_types[0]))
analysisTask = threading.Thread(target=importedAnalyzer.analyze, args=(scraped_reviews, resultPath))
analysisTask.start()
analysisTask.join()
except Exception as e:
print '###### startAnalysis ' + str(e)
return {'error':'%s analyzer doesn\'t exist' %(analysis_types[0]), 'loading':False}
with open(resultPath) as result:
with self.output().open('w') as output:
json.dump(result, output)
'''
#GSI CRAWLER functions
def return_json(result):
return json.dumps(result)
def startScraper(webpage, url, unique_id):
try:
filePath = './analysis/%s.scraper' % (unique_id)
print "filePath: ",filePath
scraperImported = imp.load_source(webpage, './scrapers/%s.py' % (webpage))
scraperTask = threading.Thread(target=scraperImported.startScraping, args=(url, filePath))
scraperTask.start()
except Exception as e:
return {'error':'%s scraper doesn\'t exist' % (webpage), 'loading':False}
def scrap_url(nameWeb,analysisType, url):
#global scraping
webpage = nameWeb
analysis_type = analysisType
unique_id = str(uuid.uuid1())
#unique_id = 'test'
rv = {'error':None, 'loading':True, 'uuid':unique_id,
'analysis_type' : analysis_type,
'webpage':webpage, 'scraping':True}
pending_analysis[unique_id] = rv
startScraper(webpage, url, unique_id)
return return_json(rv)
def retrieve_info(self):
print "######Inside retrieve_info"
global pending_analysis
unique_id = 'test'
try:
analysis_info = pending_analysis[unique_id]
if(analysis_info['scraping']):
return self.checkScrapedFinishedAndStartAnalysis(self,analysis_info, unique_id)
#else:
#return checkAnalysisFinished(analysis_info, unique_id)
except Exception as e:
print '#### retrieve_info ' + str(e)
return return_json({'error':'No valid uuid', 'loading':False, 'uuid':unique_id})
def checkScrapedFinishedAndStartAnalysis(self,analysis_info, unique_id):
try:
print "######Inside checkScrapedFinishedAndStartAnalysis"
with open('analysis/%s.scraper'%unique_id, 'r') as analysis_file:
scraped_reviews = json.loads(analysis_file.read())
if('error' in scraped_reviews and scraped_reviews['error'] != None):
return return_json(scraped_reviews)
analysis_info['scraping'] = False
analysis_types = analysis_info['analysis_type'].split(",")
error = self.startAnalysis(scraped_reviews, analysis_types[0], unique_id)
del analysis_types[0]
analysis_info['analysis_type'] = ','.join(analysis_types)
if(error):
return return_json(error)
print "Analisis :",return_json(analysis_info)
return return_json(analysis_info)
except Exception as e:
print '###### checkScrapedFinishedAndStartAnalysis' + str(e)
return return_json(analysis_info)
def startAnalysis(self,scraped_reviews, analysis_type, unique_id):
try:
resultPath = '%s/analysis/%s.analisis' % (os.getcwd(), unique_id)
importedAnalyzer = imp.load_source(analysis_type, '%s/analyzers/%s.py' % (os.getcwd(), analysis_type))
analysisTask = threading.Thread(target=importedAnalyzer.analyze, args=(scraped_reviews, resultPath))
analysisTask.start()
return None
except Exception as e:
print '###### startAnalysis ' + str(e)
return {'error':'%s analyzer doesn\'t exist' %(analysis_type), 'loading':False}
def run(self):
print "Analisis"
unique_id='test'
with open('analysis/%s.scraper'%unique_id, 'r') as analysis_file:
scraped_reviews = json.loads(analysis_file.read())
analysis_types = {'sentiments'}
self.startAnalysis(scraped_reviews,'sentiments',unique_id)
with open('analysis/%s.analisis' % unique_id) as analysis_file:
analysis_result = json.loads(analysis_file.read())
print return_json(analysis_result)
class SemanticTask(luigi.Task):
"""
This task loads JSON data contained in a :py:class:`luigi.target.Target` and transform into RDF file
to insert into Fuseki platform as a semantic
"""
#: date task parameter (default = today)
date = luigi.DateParameter(default=datetime.date.today())
file = str(random.randint(0,10000)) + datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file on the local filesystem.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.LocalTarget(path='analysis/%s.n3' % self.file)
def requires(self):
"""
This task's dependencies:
* :py:class:`~.SenpyTask`
:return: object (:py:class:`luigi.task.Task`)
"""
return SenpyAnalysisTask()
def run(self):
"""
Receive data from Senpy and transform them to RDF format in order to be indexed in Fuseki
"""
with self.input().open('r') as infile:
j = json.load(infile)
g = Graph().parse(data=j, format='json-ld')
with self.output().open('w') as output:
output.write(g.serialize(format='n3', indent=4))
if __name__ == '__main__':
luigi.run()
| 33.230435 | 114 | 0.697501 | 3,876 | 0.507131 | 0 | 0 | 0 | 0 | 0 | 0 | 3,003 | 0.392909 |
8cfdf25e9a8b70f9400ed44ee8c791ade7d45ab5 | 1,117 | py | Python | farnsworth/peewee_extensions.py | mechaphish/farnsworth | 7d6bcbd94ab5ab521c29309fe3c47a0f6005a5d3 | [
"BSD-2-Clause"
] | 6 | 2016-08-20T23:39:42.000Z | 2020-11-06T23:08:03.000Z | farnsworth/peewee_extensions.py | mechaphish/farnsworth | 7d6bcbd94ab5ab521c29309fe3c47a0f6005a5d3 | [
"BSD-2-Clause"
] | null | null | null | farnsworth/peewee_extensions.py | mechaphish/farnsworth | 7d6bcbd94ab5ab521c29309fe3c47a0f6005a5d3 | [
"BSD-2-Clause"
] | 5 | 2016-08-21T13:16:00.000Z | 2020-11-06T23:08:17.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from peewee import Field, SQL
import itertools
"""Extend Peewee basic types."""
class EnumField(Field):
"""Define a EnumField type"""
db_field = "enum"
def __init__(self, *args, **kwargs):
self.enum_name = kwargs.pop('enum_name')
super(self.__class__, self).__init__(*args, **kwargs)
def pre_field_create(self, model):
cursor = self.get_database().get_conn().cursor()
cursor.execute("DROP TYPE IF EXISTS {};".format(self.enum_name))
choices_str = ", ".join(itertools.repeat("%s", len(self.choices)))
query = "CREATE TYPE {} AS ENUM ({});".format(self.enum_name, choices_str)
cursor.execute(query, self.choices)
self.db_field = self.enum_name
def coerce(self, value):
if value not in self.choices:
raise Exception("Invalid Enum Value `%s`", value)
return str(value)
def get_column_type(self):
return "enum"
def __ddl_column__(self, ctype):
return SQL(self.enum_name)
| 28.641026 | 82 | 0.642793 | 926 | 0.829006 | 0 | 0 | 0 | 0 | 0 | 0 | 217 | 0.19427 |
8cfdfa798899384c0733d181291fe22066a3493e | 3,461 | py | Python | interprete/src/models/graphcodebert/model.py | serjtroshin/PLBART | 58e5de3041a2fc8b98e54648c6489fb3c23db9cb | [
"MIT"
] | null | null | null | interprete/src/models/graphcodebert/model.py | serjtroshin/PLBART | 58e5de3041a2fc8b98e54648c6489fb3c23db9cb | [
"MIT"
] | null | null | null | interprete/src/models/graphcodebert/model.py | serjtroshin/PLBART | 58e5de3041a2fc8b98e54648c6489fb3c23db9cb | [
"MIT"
] | null | null | null | from pathlib import Path
from typing import List
import torch
from transformers import AutoModel, AutoTokenizer
from interprete.src.models.model import Model, ModelOutput
from interprete.src.models.utils import to_cpu
class GraphCodeBertModel(Model):
def __init__(self, args=[], type="GraphCodeBert"):
super().__init__(type)
self.args = args
path = Path(__file__).parent.absolute().resolve()
print(path)
tokenizer = AutoTokenizer.from_pretrained(f"{path}/graphcodebert-base")
model = AutoModel.from_pretrained(f"{path}/graphcodebert-base")
self.model = model
self.tokenizer = tokenizer
print("loaded CodeBert model and tokenizer")
if torch.cuda.is_available():
self.model = self.model.cuda()
@staticmethod
def get_model(model_type, **kwargs):
return GraphCodeBertModel(type=model_type)
def bpe(self, code: str, max_positions=512) -> List[str]:
inp = code
inp = inp.replace("โ", "_")
tokens = self.tokenizer.tokenize(inp)
if len(tokens) > max_positions - 2:
tokens = tokens[: max_positions - 2]
tokens = list(map(lambda x: x.replace("ฤ ", "โ"), tokens))
tokens[
0
] = f"โ{tokens[0]}" # first subtoken was not prefixed with special BPE symbol
return tokens
def __call__(self, bpe: List[str]):
"""
Returns:
dict
"""
code = "".join(bpe).replace("โ", " ").strip()
# "โ" symbol in code resulted in ['ฤ ', 'รข', 'ฤธ', 'ฤฃ'] tokens
# inp = "hello i am Sergey"
code_tokens = self.tokenizer.tokenize(code)
tokens = [self.tokenizer.cls_token] + code_tokens + [self.tokenizer.sep_token]
tokens_ids = torch.tensor(self.tokenizer.convert_tokens_to_ids(tokens))[None, :]
# print("token_ids", tokens_ids.shape)
max_idx = 512
if tokens_ids.shape[-1] > max_idx:
# if input is too long, crop it
tokens_ids = torch.cat(
(
tokens_ids[..., :1],
tokens_ids[..., 1 : max_idx - 1],
tokens_ids[..., -1:],
),
dim=-1,
)
if torch.cuda.is_available():
tokens_ids = tokens_ids.cuda()
# simply generate one code span
with torch.no_grad():
generated_ids = self.model.forward(
torch.tensor(tokens_ids), output_hidden_states=True
)
bpes = list(
map(
lambda x: x.replace("ฤ ", "โ"),
self.tokenizer.convert_ids_to_tokens(
tokens_ids[0], skip_special_tokens=True
),
)
)
bpes[
0
] = f"โ{bpes[0]}" # in hugginface first subtoken was not prefixed with special BPE symbol
features = to_cpu(
list(list(map(lambda x: x[:, 1:-1, :], generated_ids.hidden_states)))
)
tokens = tokens_ids[0].cpu().clone().numpy()
tokens = tokens[1:-1]
return ModelOutput(bpes, tokens, features)
@staticmethod
def get_embeddings_info() -> List[str]:
"""get identifiers for all embedding layer e.g. e1, e2, e3, ..., d1, d2, d3, ..."""
return [f"e{i}" for i in range(13)] + [] # no decoder values
| 33.601942 | 102 | 0.546952 | 3,258 | 0.935938 | 0 | 0 | 324 | 0.093077 | 0 | 0 | 655 | 0.188164 |
5065c0ef1a0d7d57ef1fde52307819e6cc954dfe | 6,286 | py | Python | app/verbose.py | erikosmond/knights_tour | beb4cbb3a081d1c134ed180c6044e3ba7a4a6016 | [
"MIT"
] | null | null | null | app/verbose.py | erikosmond/knights_tour | beb4cbb3a081d1c134ed180c6044e3ba7a4a6016 | [
"MIT"
] | null | null | null | app/verbose.py | erikosmond/knights_tour | beb4cbb3a081d1c134ed180c6044e3ba7a4a6016 | [
"MIT"
] | 1 | 2019-05-04T13:03:53.000Z | 2019-05-04T13:03:53.000Z | class Verbose(object):
Initialized = False
def __init__(self, verbosity, show_info=False):
assert type(verbosity) is int, "verbose takes an integer value of 0-1023"
self.verbose_int = verbosity
self.info = """
bit 0[-1](1) - max/min values
bit 1[-2](2) - retrace
bit 2[-3](4) - visited positions
bit 3[-4](8) - board when it changes
bit 4[-5](16) - every move
bit 5[-6](32) - recording failed position
bit 6[-7](64) - progress - list of final positions (how many moves have been made)
bit 7[-8](128) - potential OBOB
bit 8[-9](256) - possible moves
bit 9[-10](512) - final positions
"""
#create an 8bit string representing the verbose type mask
self.verbose = bin(verbosity)[2:].zfill(10)
self.show_info = show_info
#values for development and debugging
self.min_max_switch = int(self.verbose[-1])
self.retrace_switch = int(self.verbose[-2])
self.visited_positions_switch = int(self.verbose[-3])
self.board_switch = int(self.verbose[-4])
self.every_move_switch = int(self.verbose[-5])
self.failed_position_switch = int(self.verbose[-6])
self.progress_switch = int(self.verbose[-7])
self.potential_OBOB_switch = int(self.verbose[-8])
self.possible_moves_switch = int(self.verbose[-9])
#for final user to see resulting position
self.final_positions_switch = int(self.verbose[-10])
if self.show_info == True:
self._print_verbose_info()
Verbose.Initialized = True
def min_max(self, tour, largest_tour):
#as the tour progresses, will show the longest tour, and if the tour shrinks to a small size
new_max = False
if len(tour.knight.visited_positions) > largest_tour:
new_max = True
largest_tour = len(tour.knight.visited_positions)
if self.min_max_switch:
if new_max:
print "current largest tour", str(largest_tour)
elif len(tour.knight.visited_positions) in [1,2,3]:
print "size of the tour got pretty small with length ", str(len(tour.knight.visited_positions))
return largest_tour
def failed_position(self, old_position, failed_moves):
return #remove this, but change the switch
if self.failed_position_switch:
print "\told position", old_position
for i in failed_moves:
print "\t\t failed move", i
def final_positions(self, chess_piece):
if self.final_positions_switch:
self.board(chess_piece, final=True)
def retrace(self, chess_piece):
if self.retrace_switch:
print "Retracing to ", chess_piece.visited_positions[-1]
if self.visited_positions_switch:
pass
#self.final_positions(chess_piece)
def potential_OBOB(self, tour):
if self.potential_OBOB_switch:
if len(tour.knight.visited_positions) == tour.board.size -1:
print "possible OBOB with len", len(tour.knight.visited_positions)
for pos in tour.knight.visited_positions:
print pos
def progress(self, count, chess_piece=None):
if self.progress_switch:
if count % 1 == 0: #was 10000
print str(count), "moves tried so far"
if chess_piece != None:
final_positions = []
for i in chess_piece.visited_positions:
final_positions.append(i.coordinate)
print final_positions
def every_move(self, move):
if self.every_move_switch:
print "moving to", move
def board(self, chess_piece, final=False):
if self.board_switch or final==True:
print "\n\n"
board = chess_piece.get_board()
for row in range(1, board.rows+1):
for column in range(1, board.columns+1):
knight_present = False
fail_present = False
for i in chess_piece.visited_positions:
if row == i.row and column == i.column:
print chess_piece.visited_positions.index(i), "\t",
knight_present = True
break
if knight_present == True:
continue
'''
for i in chess_piece.trials: #section commented out 11-2-13
#must convert coordinate back into position; should be able to get rid of the if statement
if type(i) is tuple and len(i) == 2:
i = Position(row=i[0], column=i[1], board=board, verbosity=0)
for j in chess_piece.get_failed_moves(i):
if row == j.row and column == j.column:
print "F\t",
fail_present = True
break
if fail_present == True:
break
'''
if knight_present == False:# and fail_present == False: #second check commented out 11-2-13
print "x\t",
print "\n"
print "\n\n"
#raw_input("press any key to continue")
def possible_moves(self, origin, moves):
if self.possible_moves_switch:
print "possible moves from position", origin
for move in moves:
print "\t", move
def _print_verbose_info(self):
if self.verbose_int > 0 and self.verbose_int != 512 and Verbose.Initialized == False:
print self.info
print "current verbose settings...\n"
for i in dir(self):
value = getattr(self, i)
if "switch" in str(i):
spacing = "\t"
for j in range(3-(len(i)/8)):
spacing += "\t"
print i, spacing, getattr(self, i)
print "\n" | 43.958042 | 114 | 0.544066 | 6,286 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1,952 | 0.310531 |
5065da5be5ef82985ad4ecf2e734a4a164b47ddc | 2,170 | py | Python | src/core/game.py | Swartz-42/Irale_Game_Py | c29412ffb3e77aa5837b743c5e2f439a2c021940 | [
"BSD-2-Clause"
] | null | null | null | src/core/game.py | Swartz-42/Irale_Game_Py | c29412ffb3e77aa5837b743c5e2f439a2c021940 | [
"BSD-2-Clause"
] | null | null | null | src/core/game.py | Swartz-42/Irale_Game_Py | c29412ffb3e77aa5837b743c5e2f439a2c021940 | [
"BSD-2-Clause"
] | null | null | null | import pygame
from src.entities import Player
from src.utils import DialogBox
from src.map import MapManager
class Game:
def __init__(self):
super().__init__()
# creer la fenetre du jeu
self.screen = pygame.display.set_mode((1280, 720))
pygame.display.set_caption("Irale - Le jeux video")
self.running = True
# generer le joueur
self.player = Player()
self.map_manager = MapManager(self.screen, self.player)
self.dialog_box = DialogBox()
# definir control
def handle_input(self):
pressed = pygame.key.get_pressed()
up = pressed[pygame.K_z]
down = pressed[pygame.K_s]
right = pressed[pygame.K_d]
left = pressed[pygame.K_q]
esc = pressed[pygame.K_ESCAPE]
if up & right:
self.player.move_up()
self.player.move_right()
elif up & left:
self.player.move_up()
self.player.move_left()
elif down & right:
self.player.move_down()
self.player.move_right()
elif down & left:
self.player.move_down()
self.player.move_left()
elif down:
self.player.move_down()
elif right:
self.player.move_right()
elif left:
self.player.move_left()
elif up:
self.player.move_up()
elif esc:
self.running = False
def update(self):
self.map_manager.update()
def run(self):
clock = pygame.time.Clock()
# boucle du jeu
while self.running:
self.player.save_location()
self.handle_input()
self.update()
self.map_manager.draw()
self.dialog_box.render(self.screen)
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_e:
self.map_manager.check_npc_collisions(self.dialog_box)
clock.tick(60)
| 27.468354 | 78 | 0.552074 | 2,057 | 0.947926 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.045622 |
506778d35138226ccfa52203c940b8202f3138b6 | 6,776 | py | Python | diagnnose/models/transformer_lm.py | Kalsir/diagNNose | 06363ffe8050f792c8e6bfd8c13e51c68d327f32 | [
"MIT"
] | null | null | null | diagnnose/models/transformer_lm.py | Kalsir/diagNNose | 06363ffe8050f792c8e6bfd8c13e51c68d327f32 | [
"MIT"
] | null | null | null | diagnnose/models/transformer_lm.py | Kalsir/diagNNose | 06363ffe8050f792c8e6bfd8c13e51c68d327f32 | [
"MIT"
] | null | null | null | from functools import reduce
from typing import List, Optional, Union
import torch
from torch import Tensor
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
DistilBertForMaskedLM,
PreTrainedModel,
XLMWithLMHeadModel,
)
from diagnnose.attribute import ShapleyTensor
from diagnnose.models import LanguageModel
from diagnnose.typedefs.activations import (
ActivationDict,
ActivationName,
ActivationNames,
)
mode_to_auto_model = {
"causal_lm": AutoModelForCausalLM,
"masked_lm": AutoModelForMaskedLM,
"question_answering": AutoModelForQuestionAnswering,
"sequence_classification": AutoModelForSequenceClassification,
"token_classification": AutoModelForTokenClassification,
}
class TransformerLM(LanguageModel):
""" Huggingface LM wrapper. """
def __init__(
self,
model_name: str,
mode: Optional[str] = None,
embeddings_attr: Optional[str] = None,
cache_dir: Optional[str] = None,
):
super().__init__()
auto_model = mode_to_auto_model.get(mode, AutoModel)
self.pretrained_model: PreTrainedModel = auto_model.from_pretrained(
model_name, cache_dir=cache_dir
)
self.embeddings_attr = embeddings_attr
def forward(
self,
input_ids: Optional[Tensor] = None,
inputs_embeds: Optional[Union[Tensor, ShapleyTensor]] = None,
input_lengths: Optional[List[int]] = None,
attention_mask: Optional[Tensor] = None,
compute_out: bool = True,
only_return_top_embs: bool = True,
) -> Union[ActivationDict, Tensor]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
if inputs_embeds is None and input_ids is None:
raise ValueError("inputs_embeds or input_ids must be provided")
if inputs_embeds is None:
inputs_embeds = self.create_inputs_embeds(input_ids)
if len(inputs_embeds.shape) == 2:
inputs_embeds = inputs_embeds.unsqueeze(0) # Add batch dimension
if input_lengths is None:
batch_size, max_sen_len = inputs_embeds.shape[:2]
input_lengths = torch.tensor(batch_size * [max_sen_len])
if attention_mask is None:
attention_mask = self.create_attention_mask(input_lengths)
model = (
self.pretrained_model if compute_out else self.pretrained_model.base_model
)
output = model(inputs_embeds=inputs_embeds, attention_mask=attention_mask)
if isinstance(output, tuple):
output = output[0]
if only_return_top_embs:
return output
return {(-1, "out"): output}
@staticmethod
def create_attention_mask(input_lengths: List[int]) -> Tensor:
"""Creates an attention mask as described in:
https://huggingface.co/transformers/glossary.html#attention-mask
Parameters
----------
input_lengths : List[int]
List containing sentence lengths of each batch item.
Returns
-------
attention_mask : Tensor
Attention mask prescribing which items may be taken into
account by the attention mechanism.
Size: batch_size x max_sen_length
"""
max_sen_len = max(input_lengths)
attention_mask = torch.zeros(len(input_lengths), max_sen_len)
for idx, length in enumerate(input_lengths):
attention_mask[idx, :length] = 1.0
return attention_mask
def create_inputs_embeds(self, input_ids: Tensor) -> Tensor:
if self.embeddings_attr is not None:
attrs = self.embeddings_attr.split(".")
embeddings = reduce(getattr, attrs, self.pretrained_model)
inputs_embeds: Tensor = embeddings(input_ids)
else:
base_model = self.pretrained_model.base_model
if hasattr(base_model, "wte"):
# GPT-2
inputs_embeds: Tensor = base_model.wte(input_ids)
elif hasattr(base_model, "embeddings"):
if hasattr(base_model.embeddings, "word_embeddings"):
# BERT-based models, Electra, Longformer, Reformer
inputs_embeds = base_model.embeddings.word_embeddings(input_ids)
else:
# XLM
inputs_embeds = base_model.embeddings(input_ids)
elif hasattr(base_model, "word_embedding"):
# XLNet
inputs_embeds = base_model.word_embedding(input_ids)
elif hasattr(base_model, "w"):
# CTRL
inputs_embeds = base_model.w(input_ids)
elif hasattr(base_model, "encoder"):
# T5
inputs_embeds = base_model.encoder.embed_tokens(input_ids)
else:
raise AttributeError("word embedding attribute not found")
return inputs_embeds
@property
def decoder(self) -> torch.nn.Module:
# RoBERTa / BERT
for attr in ["lm_head", "cls"]:
if hasattr(self.pretrained_model, attr):
return getattr(self.pretrained_model, attr)
if isinstance(self.pretrained_model, DistilBertForMaskedLM):
return torch.nn.Sequential(
self.pretrained_model.vocab_transform,
torch.nn.GELU(),
self.pretrained_model.vocab_layer_norm,
self.pretrained_model.vocab_projector,
)
if isinstance(self.pretrained_model, XLMWithLMHeadModel):
return self.pretrained_model.pred_layer.proj
raise AttributeError("Model decoder not found")
@property
def num_layers(self) -> int:
return self.pretrained_model.config.n_layer
@property
def top_layer(self) -> int:
if hasattr(self.pretrained_model.config, "n_layer"):
return int(self.pretrained_model.config.n_layer) - 1
elif hasattr(self.pretrained_model.config, "num_hidden_layers"):
return int(self.pretrained_model.config.num_hidden_layers) - 1
else:
raise AttributeError("Number of layers attribute not found in config")
def nhid(self, activation_name: ActivationName) -> int:
if activation_name[1] == "out":
return self.pretrained_model.config.vocab_size
return self.pretrained_model.config.hidden_size
def activation_names(self) -> ActivationNames:
return [(-1, "out")]
| 35.47644 | 86 | 0.643152 | 5,897 | 0.870277 | 0 | 0 | 2,068 | 0.305195 | 0 | 0 | 1,078 | 0.159091 |
506906ff6a85d3dd74305ee68d4ad5ec97a8dca5 | 5,216 | py | Python | usbcan/somebus.py | laigui/usbcan | a58e8f4a7d757fdadc4dd57a039cd4afa016d585 | [
"MIT"
] | null | null | null | usbcan/somebus.py | laigui/usbcan | a58e8f4a7d757fdadc4dd57a039cd4afa016d585 | [
"MIT"
] | null | null | null | usbcan/somebus.py | laigui/usbcan | a58e8f4a7d757fdadc4dd57a039cd4afa016d585 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
''' somebus.py: Somebus USBCAN-II adaptor driver class.
Copyright (C) 2019 Laigui Qin <laigui@gmail.com>'''
from ctypes import *
class VciInitConfig(Structure):
"""
INIT_CONFIG็ปๆไฝๅฎไนไบๅๅงๅCAN็้
็ฝฎ
"""
_fields_ = [("AccCode", c_ulong), # ้ชๆถ็ ๏ผๅ้ขๆฏๆฐๆฎ็ฑปๅ
("AccMask", c_ulong), # ๅฑ่ฝ็
("Reserved", c_ulong), # ไฟ็
("Filter", c_ubyte), # ๆปคๆณขไฝฟ่ฝใ0=ไธไฝฟ่ฝ๏ผ1=ไฝฟ่ฝไฝฟ่ฝๆถ๏ผ/
# ่ฏทๅ็
งSJA1000้ชๆถๆปคๆณขๅจ่ฎพ็ฝฎ้ชๆถ็ ๅๅฑ่ฝ็ ใ
("Timing0", c_ubyte), # ๆณข็น็ๅฎๆถๅจ0๏ผBTR0๏ผ
("Timing1", c_ubyte), # ๆณข็น็ๅฎๆถๅจ1๏ผBTR1)
("Mode", c_ubyte)] # ๆจกๅผใ=0ไธบๆญฃๅธธๆจกๅผ๏ผ=1ไธบๅชๅฌๆจกๅผ๏ผ =2ไธบ่ชๅ่ชๆถๆจกๅผ
class VciCanObj(Structure):
"""
CAN_OBJ็ปๆไฝ่กจ็คบๅธง็ๆฐๆฎ็ปๆใ ๅจๅ้ๅฝๆฐTransmitๅๆฅๆถๅฝๆฐReceiveไธญ่ขซ็จๆฅไผ ้CANไฟกๆฏๅธงใ
"""
_fields_ = [("ID", c_uint), # ๆฅๆๅธงID'''
("TimeStamp", c_uint), # ๆฅๆถๅฐไฟกๆฏๅธงๆถ็ๆถ้ดๆ ่ฏ
("TimeFlag", c_ubyte), # ๆฏๅฆไฝฟ็จๆถ้ดๆ ่ฏ๏ผ ไธบ1ๆถTimeStampๆๆ
("SendType", c_ubyte), # ๅ้ๅธง็ฑปๅใ=0ๆถไธบๆญฃๅธธๅ้,=1ๆถไธบๅๆฌกๅ้๏ผไธ่ชๅจ้ๅ)๏ผ/
# =2ๆถไธบ่ชๅ่ชๆถ๏ผ็จไบๆต่ฏCANๅกๆฏๅฆๆๅ๏ผ ๏ผ =3ๆถไธบๅๆฌก่ชๅ่ชๆถ๏ผๅชๅ้ไธๆฌก๏ผ ็จไบ่ชๆต่ฏ๏ผ๏ผ/
# ๅชๅจๆญคๅธงไธบๅ้ๅธงๆถๆๆไนใ
("RemoteFlag", c_ubyte), # ๆฏๅฆๆฏ่ฟ็จๅธงใ=0ๆถไธบๆฐๆฎๅธง๏ผ=1ๆถไธบ่ฟ็จๅธงใ
("ExternFlag", c_ubyte), # ๆฏๅฆๆฏๆฉๅฑๅธงใ=0ๆถไธบๆ ๅๅธง๏ผ11ไฝๅธงID๏ผ๏ผ=1ๆถไธบๆฉๅฑๅธง๏ผ29ไฝๅธงID๏ผใ
("DataLen", c_ubyte), # ๆฐๆฎ้ฟๅบฆDLC(<=8)๏ผ ๅณData็้ฟๅบฆ
("Data", c_ubyte * 8), # CANๆฅๆ็ๆฐๆฎใ ็ฉบ้ดๅDataLen็็บฆๆใ
("Reserved", c_ubyte * 3)] # ็ณป็ปไฟ็
class VciErrInfo(Structure):
"""
ERR_INFO็ปๆไฝ็จไบ่ฃ
่ฝฝVCIๅบ่ฟ่กๆถไบง็็้่ฏฏไฟกๆฏใ ็ปๆไฝๅฐๅจReadErrInfoๅฝๆฐไธญ่ขซๅกซๅ
ใ
"""
_fields_ = [("ErrCode", c_uint), # ้่ฏฏ็ ใ ๅฏนๅบ1.2 ไธญ็้่ฏฏ็ ๅฎไนใ
("Passive_ErrData", c_ubyte), # ๅฝไบง็็้่ฏฏไธญๆๆถๆ้่ฏฏๆถ่กจ็คบไธบๆถๆ้่ฏฏ็้่ฏฏๆ ่ฏๆฐๆฎ
("ArLost_ErrData", c_ubyte)] # ๅฝไบง็็้่ฏฏไธญๆไปฒ่ฃไธขๅคฑ้่ฏฏๆถ่กจ็คบไธบไปฒ่ฃไธขๅคฑ้่ฏฏ็้่ฏฏๆ ่ฏๆฐๆฎ
class USBCAN():
"""
GCAN USBCAN adaptor
"""
STATUS_OK = 1
STATUS_ERR = 0
RECEIVE_ERR = 0xFFFFFFFF
def __init__(self):
pass
def OpenDevice(self, nDeviceType, nDeviceInd):
"""
:param nDeviceType:
:param nDeviceInd:
:return:
"""
dll = windll.LoadLibrary('./ECanVci64.dll') # ่ฐ็จdllๆไปถ
nReserved = 0
return dll.OpenDevice(nDeviceType, nDeviceInd, nReserved)
def InitCAN(self, DevType, DevIndex, CANIndex, pInitConfig):
"""
:param DevType:
:param DevIndex:
:param CANIndex:
:param pInitConfig:
:return:
"""
dll = windll.LoadLibrary('./ECanVci64.dll') # ่ฐ็จdllๆไปถ
return dll.InitCAN(DevType, DevIndex, CANIndex, pInitConfig)
def StartCAN(self, DevType, DevIndex, CANIndex):
"""
ๆญคๅฝๆฐ็จไปฅๅฏๅจUSBCAN่ฎพๅค็ๆไธไธชCAN้้ใ ๅฆๆๅคไธชCAN้้ๆถ๏ผ ้่ฆ
ๅคๆฌก่ฐ็จใ ๅจๆง่กStartCANๅฝๆฐๅ๏ผ ้่ฆๅปถ่ฟ10msๆง่กTransmitๅฝๆฐใ
:param DevType:
:param DevIndex:
:param CANIndex:
:return:
"""
dll = windll.LoadLibrary('./ECanVci64.dll') # ่ฐ็จdllๆไปถ
return dll.StartCAN(DevType, DevIndex, CANIndex)
def Transmit(self, DevType, DevIndex, CANIndex, pSend, Len):
"""
่ฟๅๅฎ้
ๅ้ๆๅ็ๅธงๆฐ้ใ
:param DevType:
:param DevIndex:
:param CANIndex:
:param pSend:
:param Len:
:return:
"""
dll = windll.LoadLibrary('./ECanVci64.dll') # ่ฐ็จdllๆไปถ
return dll.Transmit(DevType, DevIndex, CANIndex, pSend, Len)
def Receive(self, DevType, DevIndex, CANIndex, pReceive, Len, WaitTime):
"""
ๆญคๅฝๆฐไปๆๅฎ็่ฎพๅคCAN้้็็ผๅฒๅบ้่ฏปๅๆฐๆฎใ
:param DevType:
:param DevIndex:
:param CANIndex:
:param pReceive:
:param Len:
:param WaitTime:
:return:
"""
dll = windll.LoadLibrary('./ECanVci64.dll') # ่ฐ็จdllๆไปถ
return dll.Receive(DevType, DevIndex, CANIndex, pReceive, Len, WaitTime)
def CloseDevice(self, DevType, DevIndex):
"""
ๆญคๅฝๆฐ็จไบๅ
ณ้ญ่ฎพๅคใ
:param DevType:
:param DevIndex:
:return:
"""
dll = windll.LoadLibrary('./ECanVci64.dll') # ่ฐ็จdllๆไปถ
return dll.CloseDevice(DevType, DevIndex)
def ClearBuffer(self, DevType, DevIndex, CANIndex):
"""
ๆญคๅฝๆฐ็จไปฅๆธ
็ฉบๆๅฎCAN้้็็ผๅฒๅบใ
:param DevType:
:param DevIndex:
:param CANIndex:
:return:
"""
dll = windll.LoadLibrary('./ECanVci64.dll') # ่ฐ็จdllๆไปถ
return dll.ClearBuffer(DevType, DevIndex, CANIndex)
def ReadErrInfo(self, DevType, DevIndex, CANIndex, pErrInfo):
"""
:param DevType:
:param DevIndex:
:param CANIndex:
:param pErrInfo:
:return:
"""
dll = windll.LoadLibrary('./ECanVci64.dll') # ่ฐ็จdllๆไปถ
return dll.ReadErrInfo(DevType, DevIndex, CANIndex, pErrInfo)
def ReadCanStatus(self, DevType, DevIndex, CANIndex, pCANStatus):
"""
:param DevType:
:param DevIndex:
:param CANIndex:
:param pCANStatus:
:return:
"""
dll = windll.LoadLibrary('./ECanVci64.dll') # ่ฐ็จdllๆไปถ
return dll.ReadCanStatus(DevType, DevIndex, CANIndex, pCANStatus) | 31.612121 | 89 | 0.551189 | 6,159 | 0.973909 | 0 | 0 | 0 | 0 | 0 | 0 | 3,677 | 0.581436 |
50695771cffd15a76d764cc359fe32d149466b8a | 2,285 | py | Python | PyFlow/Ui/StyleSheetEditor.py | pedroCabrera/PyFlow | 8b439d9b47fff450e91c09d40c7b286e88cb624f | [
"MIT"
] | 7 | 2018-06-24T15:55:00.000Z | 2021-07-13T08:11:25.000Z | PyFlow/Ui/StyleSheetEditor.py | pedroCabrera/PyFlow | 8b439d9b47fff450e91c09d40c7b286e88cb624f | [
"MIT"
] | 32 | 2019-02-18T20:47:46.000Z | 2019-05-30T12:51:10.000Z | PyFlow/Ui/StyleSheetEditor.py | pedroCabrera/PyFlow | 8b439d9b47fff450e91c09d40c7b286e88cb624f | [
"MIT"
] | 5 | 2019-02-19T23:26:21.000Z | 2020-12-23T00:32:59.000Z | from Qt import QtWidgets
from Qt import QtCore
from widgets.pc_HueSlider import pc_HueSlider,pc_GradientSlider
if __name__ == '__main__':
import sys
sys.path.append("..")
import stylesheet
else:
from .. import stylesheet
from .. import resources
class StyleSheetEditor(QtWidgets.QWidget):
"""Style Sheet Editor"""
Updated = QtCore.Signal()
def __init__(self,parent=None):
super(StyleSheetEditor, self).__init__(parent)
self.style = stylesheet.editableStyleSheet()
self.setLayout(QtWidgets.QVBoxLayout ())
self.mainGroup = QtWidgets.QGroupBox(self)
self.mainGroupLay = QtWidgets.QVBoxLayout(self.mainGroup)
mainLabel = QtWidgets.QLabel("Main Color Hue",parent =self.mainGroup)
self.main_hue = pc_HueSlider(self.mainGroup)
self.main_hue.valueChanged.connect(self.updateHue)
self.main_light = pc_GradientSlider(self.mainGroup)
self.main_light.valueChanged.connect(self.updateLight)
self.mainGroupLay.addWidget(mainLabel)
self.mainGroupLay.addWidget(self.main_hue)
self.mainGroupLay.addWidget(self.main_light)
self.bgColor = pc_GradientSlider(self)
self.bgColor.valueChanged.connect(self.updateBg)
self.layout().addWidget(self.mainGroup)
self.layout().addWidget(self.bgColor)
self.setColor(self.style.MainColor)
self.bgColor.setValue(0.196)
self.main_light.setValue(self.MainColor.lightnessF())
self.USETEXTUREBG = True
def setColor(self,color):
self.MainColor = color
self.main_hue.setColor(color)
def hue(self):
return self.main_hue.value()
def getStyleSheet(self):
return self.style.getStyleSheet()
def updateHue(self,value):
self.style.setHue(self.main_hue.value())
self.style.setLightness(self.main_light.value())
self.Updated.emit()
def updateLight(self,value):
self.main_hue.setLightness(self.main_light.value())
self.main_hue.update()
self.style.setLightness(self.main_light.value())
self.Updated.emit()
def updateBg(self,value):
self.style.setBg(self.bgColor.value())
self.Updated.emit()
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
a = StyleSheetEditor()
def update():
print a.bgColor.value()
app.setStyleSheet( a.getStyleSheet() )
app.setStyleSheet( a.getStyleSheet() )
a.Updated.connect(update)
a.show()
sys.exit(app.exec_()) | 29.675325 | 71 | 0.760175 | 1,738 | 0.760613 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.028009 |
5069c552bceb85ec98f646d7fca3218a5cca5f2d | 1,511 | gyp | Python | test/win/compiler-flags/calling-convention.gyp | chlorm-forks/gyp | a8921fcaab1a18c8cf7e4ab09ceb940e336918ec | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | test/win/compiler-flags/calling-convention.gyp | chlorm-forks/gyp | a8921fcaab1a18c8cf7e4ab09ceb940e336918ec | [
"BSD-3-Clause"
] | 1,432 | 2017-06-21T04:08:48.000Z | 2020-08-25T16:21:15.000Z | test/win/compiler-flags/calling-convention.gyp | chlorm-forks/gyp | a8921fcaab1a18c8cf7e4ab09ceb940e336918ec | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'test_cdecl',
'type': 'loadable_module',
'msvs_settings': {
'VCCLCompilerTool': {
'CallingConvention': 0,
},
},
'sources': [
'calling-convention.cc',
'calling-convention-cdecl.def',
],
},
{
'target_name': 'test_fastcall',
'type': 'loadable_module',
'msvs_settings': {
'VCCLCompilerTool': {
'CallingConvention': 1,
},
},
'sources': [
'calling-convention.cc',
'calling-convention-fastcall.def',
],
},
{
'target_name': 'test_stdcall',
'type': 'loadable_module',
'msvs_settings': {
'VCCLCompilerTool': {
'CallingConvention': 2,
},
},
'sources': [
'calling-convention.cc',
'calling-convention-stdcall.def',
],
},
],
'conditions': [
['MSVS_VERSION[0:4]>="2013"', {
'targets': [
{
'target_name': 'test_vectorcall',
'type': 'loadable_module',
'msvs_settings': {
'VCCLCompilerTool': {
'CallingConvention': 3,
},
},
'sources': [
'calling-convention.cc',
'calling-convention-vectorcall.def',
],
},
],
}],
],
}
| 22.552239 | 72 | 0.483786 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 878 | 0.581072 |
506a7ef933c52555e333134521cbf1974676bc2c | 1,417 | py | Python | integrationtest/vm/virtualrouter/volume/test_add_volume.py | sherry546/zstack-woodpecker | 54a37459f2d72ce6820974feaa6eb55772c3d2ce | [
"Apache-2.0"
] | 2 | 2016-03-23T08:45:44.000Z | 2017-06-26T02:40:46.000Z | integrationtest/vm/virtualrouter/volume/test_add_volume.py | KevinDavidMitnick/zstack-woodpecker | 96257faaf3c362168d008bdb47002025ad669b24 | [
"Apache-2.0"
] | null | null | null | integrationtest/vm/virtualrouter/volume/test_add_volume.py | KevinDavidMitnick/zstack-woodpecker | 96257faaf3c362168d008bdb47002025ad669b24 | [
"Apache-2.0"
] | 2 | 2020-03-12T03:11:28.000Z | 2021-07-26T01:57:58.000Z | '''
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import os
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def test():
test_util.test_dsc('Create test vm and check')
vm = test_stub.create_vlan_vm()
test_obj_dict.add_vm(vm)
test_util.test_dsc('Create volume and check')
disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('rootDiskOfferingName'))
volume_creation_option = test_util.VolumeOption()
volume_creation_option.set_disk_offering_uuid(disk_offering.uuid)
volume = test_stub.create_volume(volume_creation_option)
test_obj_dict.add_volume(volume)
volume.check()
test_util.test_dsc('Attach volume and check')
#mv vm checker later, to save some time.
vm.check()
volume.attach(vm)
volume.check()
test_util.test_dsc('Detach volume and check')
volume.detach()
volume.check()
test_util.test_dsc('Delete volume and check')
volume.delete()
volume.check()
test_obj_dict.rm_volume(volume)
vm.destroy()
vm.check()
test_util.test_pass('Create Data Volume for VM Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
| 28.918367 | 99 | 0.723359 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 308 | 0.217361 |
506ad7660bd1038dfb11c4b2c1efad4b5755bce2 | 679 | py | Python | agents/base_agent.py | IanYHWu/msc_2021 | 0ae09ed392cce5fdf0e85d1f96b7af82900835f8 | [
"MIT"
] | null | null | null | agents/base_agent.py | IanYHWu/msc_2021 | 0ae09ed392cce5fdf0e85d1f96b7af82900835f8 | [
"MIT"
] | null | null | null | agents/base_agent.py | IanYHWu/msc_2021 | 0ae09ed392cce5fdf0e85d1f96b7af82900835f8 | [
"MIT"
] | null | null | null |
class BaseAgent(object):
"""
Class for the basic agent objects.
"""
def __init__(self,
env,
actor_critic,
storage,
device):
"""
env: (gym.Env) environment following the openAI Gym API
"""
self.env = env
self.actor_critic = actor_critic
self.storage = storage
self.device = device
self.t = 0
def predict(self, obs, hidden_state, done):
"""
Predict the action with the given input
"""
pass
def optimize(self):
"""
Train the neural network model
"""
pass
| 19.4 | 63 | 0.483063 | 675 | 0.994109 | 0 | 0 | 0 | 0 | 0 | 0 | 246 | 0.362297 |
506c907cacd0c8e8e93832785451ebe1451a8d30 | 175 | py | Python | malpi/dkwm/gym_envs/__init__.py | Bleyddyn/malpi | 9315f19366bd56da12c6dc7a84d830bbec530753 | [
"MIT"
] | 5 | 2017-03-27T22:15:54.000Z | 2022-01-19T23:46:46.000Z | malpi/dkwm/gym_envs/__init__.py | Bleyddyn/malpi | 9315f19366bd56da12c6dc7a84d830bbec530753 | [
"MIT"
] | 10 | 2017-01-19T19:22:06.000Z | 2022-02-27T21:29:50.000Z | malpi/dkwm/gym_envs/__init__.py | Bleyddyn/malpi | 9315f19366bd56da12c6dc7a84d830bbec530753 | [
"MIT"
] | null | null | null | from gym.envs.registration import register
from malpi.dkwm.gym_envs.dkwm_env import DKWMEnv
register(
id='dkwm-v0',
entry_point='malpi.dkwm.gym_envs:DKWMEnv',
)
| 19.444444 | 48 | 0.742857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.217143 |
506c9a44405c77c7273d7106b363a07835a96fae | 814 | py | Python | ProjectEuler100/Problem_003.py | shiv-1998/EulerProject | f10a7cc935a9cf9be45b639056a229b262b8709e | [
"MIT"
] | null | null | null | ProjectEuler100/Problem_003.py | shiv-1998/EulerProject | f10a7cc935a9cf9be45b639056a229b262b8709e | [
"MIT"
] | null | null | null | ProjectEuler100/Problem_003.py | shiv-1998/EulerProject | f10a7cc935a9cf9be45b639056a229b262b8709e | [
"MIT"
] | null | null | null | #!/bin/python3
import sys
import math
def isPrime(n):
if n==2 or n==3 or n==5 or n==7 or n==11 or n==13 or n==13 or n==17 or n==19:
return True
upperLimit = math.ceil(math.sqrt(n))+1
for i in range(2,upperLimit):
if n%i==0:
return False
return True
t = int(input().strip())
for a0 in range(t):
n = int(input().strip())
if isPrime(n):
print(n)
else:
factors = []
upperLimit = math.ceil(math.sqrt(n)) + 1
for i in range(2,upperLimit):
if n%i==0:
factors.append(i)
if i!=(n//i):
factors.append(n//i)
sorted(factors)
for j in range(len(factors)-1,-1,-1):
if isPrime(factors[j]):
print(factors[j])
break
| 24.666667 | 81 | 0.486486 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.017199 |
506d74f909789d8ddf4cf808fbde73c70bc1832a | 9,579 | py | Python | opcua/__init__.py | minix1234/hacore_opcua | b77d1611416ebc6cc823562f81d758915672aa49 | [
"MIT"
] | 4 | 2021-01-10T21:13:30.000Z | 2021-11-13T09:22:48.000Z | opcua/__init__.py | minix1234/hacore_opcua | b77d1611416ebc6cc823562f81d758915672aa49 | [
"MIT"
] | 7 | 2021-03-06T05:14:26.000Z | 2021-12-29T04:38:46.000Z | opcua/__init__.py | minix1234/hacore_opcua | b77d1611416ebc6cc823562f81d758915672aa49 | [
"MIT"
] | null | null | null | """Support for OPCUA"""
import logging
import voluptuous as vol
from opcua import Client, ua
from homeassistant.const import (
ATTR_STATE,
CONF_URL,
CONF_NAME,
CONF_TIMEOUT,
CONF_USERNAME,
CONF_PASSWORD,
EVENT_HOMEASSISTANT_STOP,
EVENT_HOMEASSISTANT_START,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from .const import (
OPCUA_DOMAIN as DOMAIN,
DEFAULT_NAME,
DEFAULT_TIMEOUT,
CONF_SESSIONTIMEOUT,
CONF_SECURETIMEOUT,
CONF_SECURITYSTRING,
CONF_URI,
SERVICE_SET_VALUE,
SERVICE_SET_ATTRIBUTE,
SERVICE_READ_VALUE,
SERVICE_CONNECT,
SERVICE_CLOSE,
ATTR_HUB,
ATTR_NODEID,
ATTR_VALUE,
)
_LOGGER = logging.getLogger(__name__)
BASE_SCHEMA = vol.Schema(
{
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_SESSIONTIMEOUT, default=3600000): cv.positive_int,
vol.Optional(CONF_SECURETIMEOUT, default=600000): cv.positive_int,
vol.Optional(CONF_USERNAME, default=None): vol.Any(None, cv.string),
vol.Optional(CONF_PASSWORD, default=None): vol.Any(None, cv.string),
vol.Optional(CONF_SECURITYSTRING, default=None): vol.Any(None, cv.string),
vol.Optional(CONF_URI, default=None): vol.Any(None, cv.string),
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Any(BASE_SCHEMA),
],
),
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SET_VALUE_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_HUB, default=DEFAULT_NAME): cv.string,
vol.Required(ATTR_NODEID): cv.string,
vol.Required(ATTR_VALUE): vol.Any(
float,
int,
str,
cv.byte,
cv.boolean,
cv.time,
),
}
)
SERVICE_SET_ATTRIBUTE_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_HUB, default=DEFAULT_NAME): cv.string,
vol.Required(ATTR_NODEID): cv.string,
vol.Required(ATTR_VALUE): vol.Any(
float,
int,
str,
cv.byte,
cv.boolean,
cv.time,
),
}
)
SERVICE_READ_VALUE_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_HUB, default=DEFAULT_NAME): cv.string,
vol.Required(ATTR_NODEID): cv.string,
}
)
SERVICE_CONNECT_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_HUB, default=DEFAULT_NAME): cv.string,
}
)
def setup(hass, config):
hass.data[DOMAIN] = hub_collect = {}
for conf_hub in config[DOMAIN]:
# create an instance of a opcua hub connection, i.e. to a opcua server
hub_collect[conf_hub[CONF_NAME]] = OpcUAHub(conf_hub)
# Return boolean to indicate that initialization was successful.
def stop_opcua(event):
"""Stop opcua service."""
for client in hub_collect.values():
client.close()
def start_opcua(event):
"""Start opcua service."""
for client in hub_collect.values():
client.connect()
def set_value(service):
"""set opcua nodeid values."""
hub = service.data[ATTR_HUB]
value = service.data[ATTR_VALUE]
nodeid = service.data[ATTR_NODEID]
hub_collect[hub].setvalues(nodeid, value)
def set_attribute(service):
"""set opcua nodeid values."""
hub = service.data[ATTR_HUB]
value = service.data[ATTR_VALUE]
nodeid = service.data[ATTR_NODEID]
hub_collect[hub].setattribute(nodeid, value)
def read_value(service):
"""read opcua nodeid values."""
hub = service.data[ATTR_HUB]
nodeid = service.data[ATTR_NODEID]
#Trying to determine if we can even access this data gathered
return hub_collect[hub].readvalues(nodeid)
def connect(service):
"""
should be a called service to reconnect in the event the
opcua target needs to restart and the socket drops.
self.connect()
"""
hub = service.data[ATTR_HUB]
hub_collect[hub].connect()
def close(service):
"""
should be a called service to close the opcua connection gracefully.
"""
hub = service.data[ATTR_HUB]
hub_collect[hub].close()
# do not wait for EVENT_HOMEASSISTANT_START, setup opcua connection properties now
for client in hub_collect.values():
client.setup()
# register function to gracefully stop opcua connections
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_opcua)
# register function to start opcua connections on HA statup
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_opcua)
# Register service to write back values to opcua nodeids
hass.services.register(
DOMAIN,
SERVICE_SET_VALUE,
set_value,
schema=SERVICE_SET_VALUE_SCHEMA,
)
# Register service to write back values to opcua nodeids via set attributes
hass.services.register(
DOMAIN,
SERVICE_SET_ATTRIBUTE,
set_attribute,
schema=SERVICE_SET_ATTRIBUTE_SCHEMA,
)
# Register service to read opcua nodeids values on the fly
hass.services.register(
DOMAIN,
SERVICE_READ_VALUE,
read_value,
schema=SERVICE_READ_VALUE_SCHEMA,
)
# Register services for opcua target reconnection
hass.services.register(
DOMAIN,
SERVICE_CONNECT,
connect,
schema=SERVICE_CONNECT_SCHEMA,
)
# Register services for opcua connection closing
hass.services.register(
DOMAIN,
SERVICE_CLOSE,
close,
schema=SERVICE_CONNECT_SCHEMA,
)
return True
class OpcUAHub:
""" wrapper class for opcua."""
def __init__(self, client_config):
"""Initialize the opcua hub."""
# Set configuration variables
self._client = None
self._config_url = client_config[CONF_URL]
self._config_name = client_config[CONF_NAME]
self._config_timeout = client_config[CONF_TIMEOUT]
self._config_sessiontimeout = client_config[CONF_SESSIONTIMEOUT]
self._config_securetimeout = client_config[CONF_SECURETIMEOUT]
self._config_username = client_config[CONF_USERNAME]
self._config_password = client_config[CONF_PASSWORD]
self._config_security = client_config[CONF_SECURITYSTRING]
self._application_uri = client_config[CONF_URI]
@property
def name(self):
"""Return the name of this hub."""
return self._config_name
def setup(self):
"""Set up opcua client."""
_LOGGER.info('Setting up Client parameters for: '+self._config_name)
self._client = Client(self._config_url)
# Setup timeouts
self._client.timeout = self._config_timeout
self._client.session_timeout = self._config_sessiontimeout
self._client.secure_channel_timeout = self._config_securetimeout
# setup URI and Security Type
if self._application_uri is not None:
self._client.application_uri = self._application_uri
if self._config_security is not None:
self._client.set_security_string(self._config_security)
# Client Auth Setup
if self._config_username is not None:
self._client._username = self._config_username
if self._config_password is not None:
self._client._password = self._config_password
# Attempt Device Connection
# Wrapped in "try" due to socket critical error when OPCUA server rejects/tears down
# the socket https://github.com/minix1234/hacore_opcua/issues/1
# Moved initial connection attemp to start_opcua
#try:
# self.connect()
#except Exception as e:
# _LOGGER.error(e)
def close(self):
"""Disconnect client."""
try:
self._client.disconnect()
except Exception as e:
_LOGGER.error(self._config_name +': Channel Close Error: '+ str(e))
def connect(self):
"""Connect client."""
try:
self._client.connect()
except Exception as e:
_LOGGER.error(self._config_name +': Connection Error: '+ str(e))
def readvalues(self, nodeid):
try:
return self._client.get_node(nodeid).get_value()
except Exception as e:
_LOGGER.error(str(nodeid) +', Read Value Error: '+ str(e))
def setvalues(self, nodeid, value):
try:
node = self._client.get_node(nodeid)
uatype = node.get_data_value().Value.VariantType
node.set_value(value, uatype)
except Exception as e:
_LOGGER.error('Error: ' + str(e) + ' encountered when attempting to write a value of: '+str(value) +' to nodeid: '+ str(nodeid))
def setattribute(self, nodeid, value):
try:
node = self._client.get_node(nodeid)
uatype = node.get_data_value().Value.VariantType
datavalue = ua.DataValue(ua.Variant(value, uatype))
node.set_attribute(ua.AttributeIds.Value, datavalue)
except Exception as e:
_LOGGER.error('Error: ' + str(e) + ' encountered when attempting to write an attribute.ValueIds.Value of: '+str(value) +' to nodeid: '+ str(nodeid))
| 28.679641 | 161 | 0.637854 | 3,647 | 0.380729 | 0 | 0 | 105 | 0.010961 | 0 | 0 | 1,926 | 0.201065 |
506ec8d37920dc02fb0964cd084a15f385c1f491 | 2,210 | py | Python | pysnmp-with-texts/RBT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/RBT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/RBT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module RBT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/RBT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:18:46 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter32, TimeTicks, ModuleIdentity, Integer32, MibIdentifier, iso, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, ObjectIdentity, Unsigned32, Bits, NotificationType, enterprises, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "TimeTicks", "ModuleIdentity", "Integer32", "MibIdentifier", "iso", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "ObjectIdentity", "Unsigned32", "Bits", "NotificationType", "enterprises", "Counter64")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
rbt = ModuleIdentity((1, 3, 6, 1, 4, 1, 17163))
rbt.setRevisions(('2009-09-23 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: rbt.setRevisionsDescriptions(('Updated contact information',))
if mibBuilder.loadTexts: rbt.setLastUpdated('200909230000Z')
if mibBuilder.loadTexts: rbt.setOrganization('Riverbed Technology, Inc.')
if mibBuilder.loadTexts: rbt.setContactInfo(' Riverbed Technical Support support@riverbed.com')
if mibBuilder.loadTexts: rbt.setDescription('Riverbed Technology MIB')
products = MibIdentifier((1, 3, 6, 1, 4, 1, 17163, 1))
mibBuilder.exportSymbols("RBT-MIB", products=products, PYSNMP_MODULE_ID=rbt, rbt=rbt)
| 88.4 | 505 | 0.776923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,024 | 0.463348 |
506f3419eb908bd049908e74833faca71688b303 | 1,160 | py | Python | fixture/take_datetime.py | dondemonz/audit_lite | 754f4fef15fbd8db5c6c420dbfa6bd3ece39a50a | [
"Apache-2.0"
] | null | null | null | fixture/take_datetime.py | dondemonz/audit_lite | 754f4fef15fbd8db5c6c420dbfa6bd3ece39a50a | [
"Apache-2.0"
] | null | null | null | fixture/take_datetime.py | dondemonz/audit_lite | 754f4fef15fbd8db5c6c420dbfa6bd3ece39a50a | [
"Apache-2.0"
] | null | null | null | import datetime as dt
from datetime import timedelta
def take_datetimes():
m = dt.datetime.now() + timedelta(seconds=1)
starttime = m.strftime("%Y-%m-%d %H:%M:%S")
m2 = dt.datetime.now() + timedelta(seconds=2)
starttime2 = m2.strftime("%Y-%m-%d %H:%M:%S")
m3 = dt.datetime.now() + timedelta(seconds=3)
starttime3 = m3.strftime("%Y-%m-%d %H:%M:%S")
m4 = dt.datetime.now() + timedelta(seconds=4)
starttime4 = m4.strftime("%Y-%m-%d %H:%M:%S")
m5 = dt.datetime.now() + timedelta(seconds=5)
starttime5 = m5.strftime("%Y-%m-%d %H:%M:%S")
return starttime, starttime2, starttime3, starttime4, starttime5
def take_datetime_from_db_timeto(db):
time_to = db.records[0][3]
t3 = time_to.strftime("%Y-%m-%d %H:%M:%S")
return t3
def take_datetime_from_db_timefrom(db):
time_from = db.records[0][2]
t2 = time_from.strftime("%Y-%m-%d %H:%M:%S")
return t2
def take_datetime_from_db_timefrom_second_row(db):
time_from = db.records[1][2]
t2 = time_from.strftime("%Y-%m-%d %H:%M:%S")
return t2
def take_datetime():
m = dt.datetime.now()
t1 = m.strftime("%Y-%m-%d %H:%M:%S")
return t1 | 33.142857 | 68 | 0.627586 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 171 | 0.147414 |
50705e4a528d8b646477227beb32216708a0b850 | 3,894 | py | Python | shelly-static-ip.py | asillye/shelly-static-ip | 2f697b081f9ea10393686fd7f243ea3de8cfca20 | [
"MIT"
] | null | null | null | shelly-static-ip.py | asillye/shelly-static-ip | 2f697b081f9ea10393686fd7f243ea3de8cfca20 | [
"MIT"
] | null | null | null | shelly-static-ip.py | asillye/shelly-static-ip | 2f697b081f9ea10393686fd7f243ea3de8cfca20 | [
"MIT"
] | null | null | null | import threading
import traceback
import logging
import requests
from json.decoder import JSONDecodeError
from ping3 import ping
logging.basicConfig(level=logging.INFO)
GATEWAY_IP = "192.168.100.1"
STATIC_IP_MIN = 200
STATIC_IP_MAX = 254
lastDot = GATEWAY_IP.rfind(".")
ipAddressBase = GATEWAY_IP[0:lastDot+1]
threadLock = threading.Lock()
availableForStaticIp = []
dchpNeedToReconfigure = []
def registerShellyFound(outputFile, ip, mac = "", type = "", ipv4_method ="", name = ""):
threadLock.acquire()
try:
outputFile.write(ip + '\t' + mac + '\t' + type + '\t' + ipv4_method + '\t' + str(name) + '\n')
if ipv4_method == "dhcp":
dchpNeedToReconfigure.append(ip)
finally:
threadLock.release()
def detectDevice(ipLast):
ip = ipAddressBase + str(ipLast)
if STATIC_IP_MIN < ipLast & ipLast < STATIC_IP_MAX:
logging.debug('No Shelly on IP %s, pinging IP to check availability...', ip)
pingResult = ping(ip)
if pingResult == False:
logging.debug("No device on IP %s, registering as available static IP", ip)
availableForStaticIp.append(ipLast)
else:
logging.debug('Network device detected on IP %s, ping in %f sec.', ip, pingResult)
return
def detectShelly(ipLast, outputFile):
try:
ip = ipAddressBase + str(ipLast)
logging.debug('Checking for Shelly at IP %s...', ip)
url = "http://" + ip + "/settings"
response = requests.get(url, timeout=10)
if response.status_code != 200:
detectDevice(ipLast)
return
json = response.json()
device = json["device"]
cloud = json["cloud"]
cloud_enabled = cloud["enabled"]
name = json["name"]
mac = device["mac"]
type = device["type"]
wifi_sta = json["wifi_sta"]
ipv4_method = wifi_sta["ipv4_method"]
logging.info("Found: ip=%s, mac=%s, type=%s, name=%s, cloud=%d, ipv4_method=%s", ip, mac, type, name, cloud_enabled, ipv4_method)
registerShellyFound(outputFile, ip, mac, type, ipv4_method, name)
except JSONDecodeError:
return
except AttributeError:
return
except requests.ConnectionError as error:
detectDevice(ipLast)
return
def configureStaticIp(currentIp, newIp, gatewayIp):
try:
# example: http://192.168.100.165/settings/sta?ipv4_method=static&ip=192.168.100.208&netmask=255.255.255.0&gateway=192.168.100.1
logging.info("Reconfiguring Shelly with DHCP on IP %s to new IP %s with gateway %s", currentIp, newIp, gatewayIp)
url = "http://" + currentIp + "/settings/sta?ipv4_method=static&ip=" + newIp + "&netmask=255.255.255.0&gateway=" + gatewayIp
response = requests.get(url, timeout=5)
if response.status_code != 200:
logging.error("Error reconfiguring %s error code %d", currentIp, response.status_code)
return
except Exception as e:
logging.error(traceback.format_exc())
return
def scanForShellys():
ipTableFile = open("shelly-ip-table.txt", "w", encoding="utf-8")
threads = []
for c in range(2, 254):
t = threading.Thread(target=detectShelly, args=(c, ipTableFile))
threads.append(t)
t.start()
for t in threads:
t.join()
ipTableFile.close()
availableForStaticIp.sort()
dchpNeedToReconfigure.sort()
def reconfigureDhcpShellys():
for ipToReconfigure in dchpNeedToReconfigure:
if availableForStaticIp.count == 0:
logging.error("No more static IP slot available for %s. Stopping.", ipToReconfigure)
break
staticIpLast = availableForStaticIp.pop(0)
staticIp = ipAddressBase + str(staticIpLast)
configureStaticIp(ipToReconfigure, staticIp, GATEWAY_IP)
scanForShellys()
reconfigureDhcpShellys() | 33.86087 | 137 | 0.642527 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 798 | 0.204931 |
5070624cc56ad0773dad1ee66cbdce5a6d58ebcf | 743 | py | Python | src/problems/other/nearest_square.py | E1mir/PySandbox | 44b39b98a41add433f0815cd3cde4d7554629eea | [
"MIT"
] | null | null | null | src/problems/other/nearest_square.py | E1mir/PySandbox | 44b39b98a41add433f0815cd3cde4d7554629eea | [
"MIT"
] | null | null | null | src/problems/other/nearest_square.py | E1mir/PySandbox | 44b39b98a41add433f0815cd3cde4d7554629eea | [
"MIT"
] | null | null | null | def solution(num):
if num < 0:
raise ValueError
if num == 1:
return 1
k = None
for k in range(num // 2 + 1):
if k ** 2 == num:
return k
elif k ** 2 > num:
return k - 1
return k
def best_solution(num):
if num < 0:
raise ValueError
if num == 1:
return 1
low = 0
high = num // 2 + 1
while low + 1 < high:
mid = low + (high - low) // 2
square = mid ** 2
if square == num:
return mid
elif square < num:
low = mid
else:
high = mid
return low
if __name__ == '__main__':
a = solution(99898)
print(a)
a = best_solution(19)
print(a) | 15.808511 | 37 | 0.442799 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.013459 |
5070e419f2d08a1bc9a7ca9e0d7ce7f4c5f2424a | 1,503 | py | Python | src/tarski/fstrips/hybrid/differential_constraints.py | phoeft670/tarski | 7d955e535fbbca012bfd1a12402b97febc6b35b9 | [
"Apache-2.0"
] | 29 | 2018-11-26T20:31:04.000Z | 2021-12-29T11:08:40.000Z | src/tarski/fstrips/hybrid/differential_constraints.py | phoeft670/tarski | 7d955e535fbbca012bfd1a12402b97febc6b35b9 | [
"Apache-2.0"
] | 101 | 2018-06-07T13:10:01.000Z | 2022-03-11T11:54:00.000Z | src/tarski/fstrips/hybrid/differential_constraints.py | phoeft670/tarski | 7d955e535fbbca012bfd1a12402b97febc6b35b9 | [
"Apache-2.0"
] | 18 | 2018-11-01T22:44:39.000Z | 2022-02-28T04:57:15.000Z |
from ...syntax import BuiltinFunctionSymbol, CompoundTerm
from . import errors as err
class DifferentialConstraint:
""" A (possibly lifted) reaction """
def __init__(self, language, name, parameters, condition, variate, ode):
self.name = name
self.language = language
self.parameters = parameters
self.condition = condition
self.variate = variate
self.ode = ode
self._check_well_formed()
def _check_well_formed(self):
if not isinstance(self.variate, CompoundTerm):
raise err.InvalidDifferentialConstraintDefinition(self.variate, "Needs to be a compound term")
if isinstance(self.variate, BuiltinFunctionSymbol):
raise err.InvalidDifferentialConstraintDefinition(self.variate, "Cannot be a built-in function")
# ....
def ident(self):
params = ', '.join([str(o) for o in self.parameters])
return '{}({})'.format(self.name, params)
def dump(self):
return dict(name=self.name,
params=[par.dump() for par in self.parameters],
condition=self.condition.dump(),
variate=self.variate.dump(),
ode=self.ode.dump())
def __str__(self):
tokens = ['reaction {}:'.format(self.name),
'cond: ({})'.format(self.condition),
'variate: {}'.format(self.variate),
'ode: {}'.format(self.ode)]
return '\n'.join(tokens)
| 35.785714 | 108 | 0.596806 | 1,413 | 0.94012 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.110446 |
507245b633d156f4473e06ba288a5567ea2cdb8d | 5,915 | py | Python | imix/models/encoder/lcgnencoder.py | linxi1158/iMIX | af87a17275f02c94932bb2e29f132a84db812002 | [
"Apache-2.0"
] | 23 | 2021-06-26T08:45:19.000Z | 2022-03-02T02:13:33.000Z | imix/models/encoder/lcgnencoder.py | XChuanLee/iMIX | 99898de97ef8b45462ca1d6bf2542e423a73d769 | [
"Apache-2.0"
] | null | null | null | imix/models/encoder/lcgnencoder.py | XChuanLee/iMIX | 99898de97ef8b45462ca1d6bf2542e423a73d769 | [
"Apache-2.0"
] | 9 | 2021-06-10T02:36:20.000Z | 2021-11-09T02:18:16.000Z | import numpy as np
import torch.nn.functional as F
from torch import Tensor
from typing import Tuple
from ..builder import ENCODER
import torch.nn as nn
import torch
@ENCODER.register_module()
class LCGNEncoder(nn.Module):
def __init__(self, WRD_EMB_INIT_FILE: str, encInputDropout: float, qDropout: float, WRD_EMB_DIM: int, ENC_DIM: int,
WRD_EMB_FIXED: bool) -> None:
"""Initialization of LCGNEncoder.
Args:
WRD_EMB_INIT_FILE: the file path storing the initial information of word embedding
encInputDropout: dropout rate of encoder input
qDropout: question dropout
WRD_EMB_DIM: the dimension of word embedding
ENC_DIM: the dimension of encoder
WRD_EMB_FIXED: if the word embedding is fixed during training
"""
super().__init__()
self.WRD_EMB_INIT_FILE = WRD_EMB_INIT_FILE
self.encInputDropout = encInputDropout
self.qDropout = qDropout
self.WRD_EMB_DIM = WRD_EMB_DIM
self.ENC_DIM = ENC_DIM
self.WRD_EMB_FIXED = WRD_EMB_FIXED
embInit = np.load(self.WRD_EMB_INIT_FILE) # shape: (2956,300)
self.embeddingsVar = nn.Parameter(torch.Tensor(embInit), requires_grad=(not self.WRD_EMB_FIXED)).cuda()
self.enc_input_drop = nn.Dropout(1 - self.encInputDropout)
self.rnn0 = BiLSTM(self.WRD_EMB_DIM, self.ENC_DIM)
self.question_drop = nn.Dropout(1 - self.qDropout)
def forward(self, qIndices: Tensor, questionLengths: Tensor) -> Tuple[Tensor, Tensor]:
"""forward computatuion of LCGNEncoder, based on inputs.
Args:
qIndices: the indices of questions, shape of batch_size x 128
questionLengths: the length of the question, shape of batch_size
Returns:
Tuple[Tensor, Tensor]: questionCntxWords: the representation of word context in questions, shape of
batch_size x128x512;
vecQuestions: the representation of the whole question, shape of batch_size x512
"""
# Word embedding
# embeddingsVar = self.embeddingsVar.cuda()
# embeddings = torch.cat(
# [torch.zeros(1, self.WRD_EMB_DIM, device='cuda'), embeddingsVar],
# dim=0)
embeddingsVar = self.embeddingsVar
embeddings = torch.cat([torch.zeros(1, self.WRD_EMB_DIM, device='cpu').cuda(), embeddingsVar], dim=0)
questions = F.embedding(qIndices, embeddings)
questions = self.enc_input_drop(questions)
# RNN (LSTM)
questionCntxWords, vecQuestions = self.rnn0(questions, questionLengths)
vecQuestions = self.question_drop(vecQuestions)
return questionCntxWords, vecQuestions
class BiLSTM(nn.Module):
def __init__(self, WRD_EMB_DIM: int, ENC_DIM: int, forget_gate_bias: float = 1.) -> None:
"""Initialization of BiLSTM.
Args:
WRD_EMB_DIM: the word embedding dimension
ENC_DIM: the dimension of the encoder for BiLSTM, which is twice of the hidden state dimension
forget_gate_bias:(optional):the initialization of the forget-gate bias
"""
super().__init__()
self.WRD_EMB_DIM = WRD_EMB_DIM
self.ENC_DIM = ENC_DIM
self.bilstm = torch.nn.LSTM(
input_size=self.WRD_EMB_DIM,
hidden_size=self.ENC_DIM // 2,
num_layers=1,
batch_first=True,
bidirectional=True)
d = self.ENC_DIM // 2
# initialize LSTM weights (to be consistent with TensorFlow)
fan_avg = (d * 4 + (d + self.WRD_EMB_DIM)) / 2.
bound = np.sqrt(3. / fan_avg)
nn.init.uniform_(self.bilstm.weight_ih_l0, -bound, bound)
nn.init.uniform_(self.bilstm.weight_hh_l0, -bound, bound)
nn.init.uniform_(self.bilstm.weight_ih_l0_reverse, -bound, bound)
nn.init.uniform_(self.bilstm.weight_hh_l0_reverse, -bound, bound)
# initialize LSTM forget gate bias (to be consistent with TensorFlow)
self.bilstm.bias_ih_l0.data[...] = 0.
self.bilstm.bias_ih_l0.data[d:2 * d] = forget_gate_bias
self.bilstm.bias_hh_l0.data[...] = 0.
self.bilstm.bias_hh_l0.requires_grad = False
self.bilstm.bias_ih_l0_reverse.data[...] = 0.
self.bilstm.bias_ih_l0_reverse.data[d:2 * d] = forget_gate_bias
self.bilstm.bias_hh_l0_reverse.data[...] = 0.
self.bilstm.bias_hh_l0_reverse.requires_grad = False
def forward(self, questions: Tensor, questionLengths: Tensor) -> Tuple[Tensor, Tensor]:
"""encoder based on BiLSTM.
Args:
questions: the question representation, sized of 128x128x300
questionLengths: the question length which is 128, recoding the length of questions.
Returns:
Tuple[Tensor, Tensor]: output:the output of bilstm, sized of batch_size x128x512; h_n:
the hidden states of bilstm, sized of batch_size x 512
"""
# sort samples according to question length (descending)
sorted_lengths, indices = torch.sort(questionLengths, descending=True)
sorted_questions = questions[indices]
_, desorted_indices = torch.sort(indices, descending=False)
# pack questions for LSTM forwarding
sorted_lengths = sorted_lengths.to(device='cpu', dtype=torch.int64)
packed_questions = nn.utils.rnn.pack_padded_sequence(sorted_questions, sorted_lengths, batch_first=True)
packed_output, (sorted_h_n, _) = self.bilstm(packed_questions)
sorted_output, _ = nn.utils.rnn.pad_packed_sequence(
packed_output, batch_first=True, total_length=questions.size(1))
sorted_h_n = torch.transpose(sorted_h_n, 1, 0).reshape(questions.size(0), -1)
# sort back to the original sample order
output = sorted_output[desorted_indices]
h_n = sorted_h_n[desorted_indices]
return output, h_n
| 43.175182 | 119 | 0.669653 | 5,716 | 0.966357 | 0 | 0 | 2,549 | 0.430938 | 0 | 0 | 2,063 | 0.348774 |
5072a03141c6bb10b8a98835580689e3de8cd922 | 8,947 | py | Python | USB/python/test-usb2020.py | wjasper/Linix_Drivers | 9c5443f3c9d249f341b6b8580929f8cdbdba4079 | [
"JasPer-2.0"
] | 100 | 2016-11-08T15:41:43.000Z | 2022-02-20T19:37:32.000Z | USB/python/test-usb2020.py | wjasper/Linix_Drivers | 9c5443f3c9d249f341b6b8580929f8cdbdba4079 | [
"JasPer-2.0"
] | 37 | 2017-01-05T17:48:14.000Z | 2022-01-06T17:43:27.000Z | USB/python/test-usb2020.py | wjasper/Linix_Drivers | 9c5443f3c9d249f341b6b8580929f8cdbdba4079 | [
"JasPer-2.0"
] | 65 | 2016-12-20T07:03:44.000Z | 2022-03-14T21:48:35.000Z | #! /usr/bin/python3
#
# Copyright (c) 2020 Warren J. Jasper <wjasper@ncsu.edu>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import time
import sys
import fcntl
import os
import math
from usb_2020 import *
def toContinue():
answer = input('Continue [yY]? ')
if (answer == 'y' or answer == 'Y'):
return True
else:
return False
def main():
# initalize the class
try:
usb2020 = usb_2020()
print("USB-2020 device found.")
except:
print("No USB-2020 device found.")
return
# print out the calibration tables
print('\nCalibration Analog Input Table:')
for channel in range(usb2020.NCHAN):
for gain in range(usb2020.NGAIN):
print(' Channel =', channel, ' Range = ',gain, \
'Slope =',format(usb2020.table_AIn[channel][gain].slope,'.5f'),\
'Intercept =',format(usb2020.table_AIn[channel][gain].intercept,'5f'))
# print last known calibration date:
mdate = usb2020.CalDate()
print('\nMFG Calibration date: ', mdate)
print("wMaxPacketSize = ", usb2020.wMaxPacketSize)
while True:
print("\nUSB-2020 Testing")
print("------------------")
print("Hit 'b' to blink LED.")
print("Hit 'B' to test BURSTIO.")
print("Hit 'C' to test continous sampling")
print("Hit 'd' to read/write digital port.")
print("Hit 'e' to exit.")
print("Hit 'i' to test analog input. (differential)")
print("Hit 'I' to test analog input scan.")
print("Hit 'M' for information.")
print("Hit 'T' to get temperature")
print("Hit 'r' to reset the device.")
print("Hit 'S' to get status")
print("Hit 's' to get serial number.")
print("Hit 'v' to get version numbers")
ch = input('\n')
if ch == 'b':
count = int(input('Enter number of times to blink: '))
usb2020.BlinkLED(count)
elif ch == 'e':
usb2020.udev.close()
exit(0)
elif ch == 'd':
print("Testing Digital I/O ...")
print("connect pins DIO[0-3] <--> DIO[4-7]")
usb2020.DTristateW(0xf0)
print("Digital Port Tristate Register = ", hex(usb2020.DTristateR()))
while True:
value = int(input('Enter a byte number [0-0xf]: '),16) & 0xf
usb2020.DLatchW(value)
value2 = usb2020.DLatchR()
value3 = usb2020.DPort() >> 4
print("The number you entered: ", hex(value3), " Latched value: ", hex(value2))
if toContinue() != True:
break
elif ch == 'i':
channel = int(input('Input channel [0-1]: '))
gain = int(input('Enter gain. 1 = +/-10V 2 = +/- 5V 3 = +/- 2V 4 = +/- 1V: '))
if gain == 1:
gain = usb2020.BP_10V
elif gain == 2:
gain = usb2020.BP_5V
elif gain == 3:
gain = usb2020.BP_2V
elif gain == 4:
gain = usb2020.BP_1V
else:
print('Unknown gain choice.')
break
usb2020.AInConfigW(0, channel, gain, True)
while True:
try:
value = usb2020.AIn(channel, gain)
except ValueError as e:
print(e)
break
print("AIn: %#x volt = %f" % (value, usb2020.volts(gain, value)))
if toContinue() != True:
break
elif ch == 'I':
print('Testing USB-2020 Analog Input Scan.')
usb2020.AInScanStop()
usb2020.AInScanClearFIFO()
count = int(input('Enter total number of scans: '))
nRepeat = int(input('Enter number of repeats: '))
gain = int(input('Enter gain. 1 = +/-10V 2 = +/- 5V 3 = +/- 2V 4 = +/- 1V: '))
frequency = float(input('Enter sampling frequency [Hz]: '))
nChan = int(input('Enter number of channels [1-2]: '))
for channel in range(nChan):
if gain == 1:
gain = usb2020.BP_10V
elif gain == 2:
gain = usb2020.BP_5V
elif gain == 3:
gain = usb2020.BP_2V
elif gain == 4:
gain = usb2020.BP_1V
else:
print('Unknown gain choice.')
break
usb2020.AInConfigW(channel, channel, gain)
usb2020.AInConfigW(nChan-1, nChan-1, gain, True)
for repeat in range(nRepeat):
print('\n\n---------------------------------------')
print('repeat: %d' % (repeat))
# mode = usb2020.VOLTAGE
mode = 0
options = 0
usb2020.AInScanStart(count, 0, frequency, options, mode)
data = usb2020.AInScanRead()
print('Number of samples read = %d (should be %d)' % (len(data), count*nChan))
for i in range(count):
print("%6d" % (i), end ='')
for j in range(nChan):
k = i*nChan + j
if mode & usb2020.VOLTAGE: # data returned in volts
print(", %8.4lf V" % data[k], end='')
else:
if data[k] >= 0xffd:
print("DAC is saturated at +FS")
elif data[k] <= 0x30:
print("DAC is saturated at -FS")
else:
data[k] = int(round(data[k]*usb2020.table_AIn[j][gain].slope + usb2020.table_AIn[j][gain].intercept))
print(", %8.4lf V" % usb2020.volts(gain, data[k]), end='')
print("")
print("\n---------------------------------------\n")
usb2020.AInScanStop()
usb2020.AInScanClearFIFO()
elif ch == 'C':
print("Testing USB-2020 Analog Input Scan in continuous mode 2 channels")
print("Hit any key to exit")
frequency = float(input("Enter desired sampling frequency (greater than 1000): "))
usb2020.AInScanStop()
nScans = 0 # for conitnuous mode
nChan = 2 # 2 channels
gain = usb2020.BP_10V
for channel in range(nChan):
usb2020.AInConfigW(channel, channel, gain)
usb2020.AInConfigW(nChan-1, nChan-1, gain, lastElement=True)
time.sleep(1)
mode = usb2020.CONTINUOUS_READOUT
options = 0
usb2020.AInScanStart(nScans, 0, frequency, options, mode)
flag = fcntl.fcntl(sys.stdin, fcntl.F_GETFL)
fcntl.fcntl(sys.stdin, fcntl.F_SETFL, flag|os.O_NONBLOCK)
i = 0
while True:
raw_data = usb2020.AInScanRead()
if i%100 == 0:
print('Scan =', i, 'samples returned =', len(raw_data))
i += 1
c = sys.stdin.readlines()
if (len(c) != 0):
break
fcntl.fcntl(sys.stdin, fcntl.F_SETFL, flag)
usb2020.AInScanStop()
usb2020.AInScanClearFIFO()
elif ch == 'B':
print('Testing USB-2020 Analog Input Scan BURSTIO mode')
usb2020.AInScanStop()
usb2020.AInScanClearFIFO()
nSamples = int(input('Enter number of samples (greater than or equal to 256, less than 64 MB and a multiple of 256): '))
channel = int(input('Input channel [0-1]: '))
frequency = float(input("Enter desired sampling frequency (greater than 1000): "))
gain = int(input('Enter gain. 1 = +/-10V 2 = +/- 5V 3 = +/- 2V 4 = +/- 1V: '))
if gain == 1:
gain = usb2020.BP_10V
elif gain == 2:
gain = usb2020.BP_5V
elif gain == 3:
gain = usb2020.BP_2V
elif gain == 4:
gain = usb2020.BP_1V
else:
print('Unknown gain choice.')
break
usb2020.AInConfigW(0, channel, gain, lastElement=True)
options = usb2020.DDR_RAM
# options = (0x1 << 7)
print('options = ', options)
mode = 0x0
usb2020.AInScanStart(nSamples, 0, frequency, options, mode)
data = usb2020.AInScanRead()
print('Number of samples read = %d (should be %d)' % (len(data), nSamples))
usb2020.AInScanStop()
usb2020.AInScanClearFIFO()
elif ch == 'M':
print("Manufacturer: %s" % usb2020.getManufacturer())
print("Product: %s" % usb2020.getProduct())
print("Serial No: %s" % usb2020.getSerialNumber())
elif ch == 'e':
usb2020.udev.close()
exit(0)
elif ch == 'r':
usb2020.Reset()
elif ch == 'S':
print('Status =', hex(usb2020.Status()))
usb2020.printStatus()
elif ch == 's':
print("Serial No: %s" % usb2020.getSerialNumber())
elif ch == 'T':
print("Internal temperature = %.2f deg C or %.2f deg " % (usb2020.Temperature(), usb2020.Temperature()*9./5. + 32.))
elif ch == 'v':
print("FPGA version %s" % (usb2020.FPGAVersion()))
if __name__ == "__main__":
main()
| 36.222672 | 126 | 0.579077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,182 | 0.35565 |
50732e512e8aa60eef305eb3f7a68cdf0138b721 | 1,186 | py | Python | archive/lym_project/treeano-master/benchmarks/fractional_max_pooling.py | peterdonnelly1/u24_lymphocyte | dff7ceed404c38582feb81aa9b8a55d80ada0f77 | [
"BSD-3-Clause"
] | 45 | 2015-04-26T04:45:51.000Z | 2022-01-24T15:03:55.000Z | archive/lym_project/treeano-master/benchmarks/fractional_max_pooling.py | peterdonnelly1/u24_lymphocyte | dff7ceed404c38582feb81aa9b8a55d80ada0f77 | [
"BSD-3-Clause"
] | 8 | 2018-07-20T20:54:51.000Z | 2020-06-12T05:36:04.000Z | archive/lym_project/treeano-master/benchmarks/fractional_max_pooling.py | peterdonnelly1/u24_lymphocyte | dff7ceed404c38582feb81aa9b8a55d80ada0f77 | [
"BSD-3-Clause"
] | 22 | 2018-05-21T23:57:20.000Z | 2022-02-21T00:48:32.000Z | import numpy as np
import theano
import theano.tensor as T
import treeano.nodes as tn
from treeano.sandbox.nodes import fmp
fX = theano.config.floatX
# TODO change me
node = "fmp2"
compute_grad = True
if node == "mp":
n = tn.MaxPool2DNode("mp", pool_size=(2, 2))
elif node == "fmp":
n = fmp.DisjointPseudorandomFractionalMaxPool2DNode("fmp1",
fmp_alpha=1.414,
fmp_u=0.5)
elif node == "fmp2":
n = fmp.OverlappingRandomFractionalMaxPool2DNode("fmp2",
pool_size=(1.414, 1.414))
else:
assert False
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=(1, 1, 32, 32)),
n]
).network()
if compute_grad:
i = network["i"].get_vw("default").variable
s = network["s"].get_vw("default").variable
fn = network.function(["i"], [T.grad(s.sum(), i)])
else:
fn = network.function(["i"], ["s"])
x = np.random.randn(1, 1, 32, 32).astype(fX)
"""
20150924 results:
%timeit fn(x)
no grad:
mp: 33.7 us
fmp: 77.6 us
fmp2: 1.91 ms
with grad:
mp: 67.1 us
fmp: 162 us
fmp2: 2.66 ms
"""
| 21.178571 | 78 | 0.559022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 231 | 0.194772 |
507377bdb80d620ea7bb76b4c3004a6064e282e7 | 1,271 | py | Python | warranty.py | TannerFilip/dell-warranty | 4118154c13b52f66a0a8e37f89abe2ca793f50ba | [
"Unlicense"
] | null | null | null | warranty.py | TannerFilip/dell-warranty | 4118154c13b52f66a0a8e37f89abe2ca793f50ba | [
"Unlicense"
] | null | null | null | warranty.py | TannerFilip/dell-warranty | 4118154c13b52f66a0a8e37f89abe2ca793f50ba | [
"Unlicense"
] | null | null | null | #!/bin/python3
'''
USAGE:
$ python warranty.py list.txt
1. Set "apikey" to the API key obtained from Dell TechDirect.
2. Create file with serial numbers, one per line, no line endings
'''
import time
import requests
import fileinput
import sys
fileName = sys.argv[1]
api_url = 'https://sandbox.api.dell.com/support/assetinfo/v4/getassetwarranty'
headers = {"Content-Type":"application/x-www-form-urlencoded",
"apikey":"aaaaaaaaaaaaaaaaaaaaaaaaaa",
"accept":"application/json"}
with open(fileName, 'r') as serialNumber:
for line in serialNumber:
payload = {"ID":""}
payload['ID'] = line
# Actually make the request
try:
r = requests.post(api_url, headers=headers, data=payload).json()
print('Serial:', payload['ID'], 'Expires', r['AssetWarrantyResponse'][0]['AssetEntitlementData'][0]['EndDate'])
except:
print('Invalid ID:', payload['ID'])
pass
# Too lazy to make it actually output a csv, this is good enough
time.sleep(1) # Wait a sec before doing it again, so to not hit the API too quickly
| 29.55814 | 132 | 0.576711 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 644 | 0.506688 |
507554c410c6294385578ca1eb9a80f4ffa123e6 | 683 | py | Python | tests/telemetry/decorators_test.py | trevorgrayson/telemetry | efeca1b2062f40a2557f2c030dc687f546f3c60b | [
"MIT"
] | 3 | 2020-03-20T20:04:37.000Z | 2021-08-23T20:11:10.000Z | tests/telemetry/decorators_test.py | trevorgrayson/telemetry | efeca1b2062f40a2557f2c030dc687f546f3c60b | [
"MIT"
] | 2 | 2019-06-10T08:02:05.000Z | 2019-06-10T08:02:22.000Z | tests/telemetry/decorators_test.py | trevorgrayson/telemetry | efeca1b2062f40a2557f2c030dc687f546f3c60b | [
"MIT"
] | null | null | null | from pytest import raises
import telemetry
REPORT_NAME = 'some.key'
meter = telemetry.get_telemeter(__name__)
@meter.catch('some_report')
def exception_prone(ii):
return 1/ii
class TestsExcept:
def test_catch_pass(self):
assert exception_prone(2) == 0.5
def test_catch_throws(self):
with raises(ZeroDivisionError):
exception_prone(0)
@meter.runtime(REPORT_NAME)
def a_slow_function(a, b):
return a + b
class TestRuntime:
def test_constants(self):
a, b = 1, 2
result = a_slow_function(a, b)
assert result == a + b
# assert PROBE.name == REPORT_NAME
# assert PROBE.value != 0
| 18.459459 | 42 | 0.645681 | 419 | 0.61347 | 0 | 0 | 139 | 0.203514 | 0 | 0 | 82 | 0.120059 |