max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
util_func.py | Wagnerd6/secondarydata | 0 | 12757751 | def txt_category_to_dict(category_str):
"""
Parameters
----------
category_str: str of nominal values from dataset meta information
Returns
-------
dict of the nominal values and their one letter encoding
Example
-------
"bell=b, convex=x" -> {"bell": "b", "convex": "x"}
"""
string_as_words = category_str.split()
result_dict = {}
for word in string_as_words:
seperator_pos = word.find("=")
key = word[: seperator_pos]
val = word[seperator_pos + 1 :][0]
result_dict[key] = val
return result_dict
def replace_comma_in_text(text):
"""
Parameters
----------
text: str of nominal values for a single mushroom species from primary_data_edited.csv
Returns
-------
replace commas outside of angular brackets with semicolons (but not inside of them)
Example
-------
text = "[a, b], [c, d]"
return: "[a, b]; [c, d]"
"""
result_text = ""
replace = True
for sign in text:
if sign == '[':
replace = False
if sign == ']':
replace = True
if sign == ',':
if replace:
result_text += ';'
else:
result_text += sign
else:
result_text += sign
return result_text
def generate_str_of_list_elements_with_indices(list_name, list_size):
"""
Parameters
----------
list_name: str, name of the list
list_size: int, number of list elements
Returns
-------
str of list elements with angular bracket indexation separated with commas
Example
-------
list_name = "l"
list_size = 3
return = "l[0], l[1], l[2]"
"""
result_str = ""
for i in range(0, list_size):
result_str += list_name + "[" + str(i) + "], "
return result_str[: -2]
# checks if a str is a number that could be interpreted as a float
def is_number(val):
"""
Parameters
----------
val: str, arbitrary input
Returns
-------
bool, True if val is interpretable as a float and False else
"""
try:
float(val)
return True
except ValueError:
return False
if __name__ == "__main__":
print(txt_category_to_dict("cobwebby=c, evanescent=e, flaring=r, grooved=g"))
| 3.734375 | 4 |
utils.py | dreamer121121/artnet-pytorch | 3 | 12757752 | import torch
import cv2
import os
import numpy as np
from torch.utils.data import DataLoader, SubsetRandomSampler
def one_hot_encode(index, num):
vector = [0 for _ in range(num)]
vector[index] = 1
return torch.Tensor(vector)
def extract_frames(video_path, save_path, fps=5):
video_name = video_path.split('/')[-1].split('.')[0]
extracted_path = os.path.join(save_path, video_name)
if not os.path.isdir(extracted_path):
os.mkdir(extracted_path)
else:
raise IOError(f'Folder {extracted_path} already exists')
cap = cv2.VideoCapture(video_path)
video_fps = cap.get(cv2.CAP_PROP_FPS)
frame_interval = video_fps // fps
if not cap.isOpened():
raise IOError(f'Cannot read {video_path}. The file is an invalid video or does not exist.')
count = 0
while(cap.isOpened()):
ret, frame = cap.read()
if ret:
count += 1
if count % frame_interval == 0:
cv2.imwrite(os.path.join(extracted_path, f'{video_name}{count:003}.jpg'), frame)
else:
break;
return extracted_path
| 2.578125 | 3 |
web_fragments/__init__.py | Jawayria/web-fragments | 0 | 12757753 | <gh_stars>0
"""
Web fragments.
"""
__version__ = '0.3.2'
default_app_config = 'web_fragments.apps.WebFragmentsConfig' # pylint: disable=invalid-name
| 1.023438 | 1 |
app/models/__init__.py | abhisuri97/mhealth | 0 | 12757754 | """
These imports enable us to make all defined models members of the models
module (as opposed to just their python files)
"""
from .user import * # noqa
from .miscellaneous import * # noqa
from .exercise import * # noqa
from .resource import * # noqa
from .medication import * # noqa
from .nutrition import * # noqa
from .plan import * # noqa
| 1.460938 | 1 |
bldr/config/env.py | bldr-cmd/bldr-cmd | 0 | 12757755 | # This is used by Environment to populate its env
# Due to circular dependencies it cannot reference other parts of bldr
import toml
import os
import platform
import shutil
from pathlib import Path
def default(dotbldr_path: str) -> dict:
"""
Load the config by merging the local config on top of included deps config
The load order - Last in wins
* .bldr/brick/*/config/config.toml
* .bldr/config/config.toml
* .bldr/brick/*/config/{BLDR_ENV}.toml
* .bldr/config/{BLDR_ENV}.toml
"""
bldr_env = os.getenv('BLDR_ENV')
full_config = {
'bldr': {
}
}
bldr_path = shutil.which('bldr')
if bldr_path != None:
if platform.system() == 'Windows':
bldr_path = to_mingw_path(bldr_path)
full_config['bldr']['cmd'] = bldr_path
deps_config_files = Path(dotbldr_path).glob( "./brick/*/config/config.toml")
for dep_config_file in deps_config_files:
dep_env = load_if_exists(dep_config_file)
full_config.update(dep_env)
local_config = load_if_exists(f"{dotbldr_path}/config/config.toml")
full_config.update(local_config)
if bldr_env != None:
e_deps_config_files = Path(dotbldr_path).glob(f"./brick/*/config/{bldr_env}.toml")
for e_dep_config_file in e_deps_config_files:
e_dep_env = load_if_exists(e_dep_config_file)
full_config.update(e_dep_env)
e_env = load_if_exists(f"{dotbldr_path}/config/{bldr_env}.toml")
full_config.update(e_env)
return full_config
def load_if_exists(path_str: str) -> dict:
path = Path(path_str)
if path.exists():
return toml.load(path)
else:
return {}
def to_mingw_path(win_path: str):
# c:\some\nested\path -> /c/some/nested/path
# 012345
win_path = win_path.replace('\\','/')
return f"/{win_path[0].lower()}/{win_path[3:]}" | 2.203125 | 2 |
actiereg/newapp.py | albertvisser/actiereg | 0 | 12757756 | <reponame>albertvisser/actiereg<filename>actiereg/newapp.py
"""doel: opvoeren van een nieuw project in de "probleemregistratie"
gebruik: python newapp.py <name> [copy|activate|loaddata|undo] [xml-file]
zonder tweede argument maakt dit een platte kopie van de basisapplicatie
(m.a.w. de opties copy + activate uit het onderstaande)
met een * als eerste argument voert dit het bovengenoemde uit voor alle
nog niet geactiveerde applicaties in apps.dat
om een aangepaste kopie te maken kun je als tweede argument opgeven:
'copy': kopieer programmatuur en templates (om aan te passen voorafgaand
aan "activate")
'activate': tabellen toevoegen aan de database en de applicatie gereed melden
zodat deze op het startscherm verschijnt
'loaddata': tabellen (settings en data) initieel vullen vanuit opgegeven
xml-file (niet meer mogelijk na activeren)
'undo': als er iets niet naar wens kun je een en ander ongedaan maken
door dit als tweede argument op te geven
"""
import os
import sys
import pathlib
import subprocess
import shutil
BASE = pathlib.Path(__file__).parent.resolve()
sys.path.append(str(BASE.parent))
APPS = BASE / "apps.dat"
USAGE = __doc__
ROOT_FILES = ('__init__.py', 'models.py', 'views.py', 'urls.py', 'admin.py',
'initial_data.json')
SYMLINK = (ROOT_FILES[1], ROOT_FILES[3])
TEMPLATE_FILES = ('index', 'actie', 'tekst', 'voortgang', 'select', 'order',
'settings')
class NewProj:
"""applicatiefiles kopieren en aanpassen
"""
def __init__(self, *args):
self.root = self.action = self.load_from = ''
self.actiondict = {'copy': self.do_copy,
'activate': self.activate,
'undo': self.undo,
'all': self.do_all}
if args:
self.msg = self.parse_args(*args)
def __str__(self):
name = str(self.__class__).split()[1][1:-2]
return "{}('{}', '{}', '{}')".format(
name, self.root, self.action, self.load_from)
def do_stuff(self):
"""perform actions
"""
if self.msg:
print(self.msg)
return
print('performing actions for project "{}":'.format(self.root))
self.actiondict[self.action]()
print("ready.")
if self.action in ("activate", "all", "undo"):
print("\nRestart the server to see the changes.")
def parse_args(self, *args):
"""parse arguments: project name, action to be taken, name of data file
(only for loaddata)
"""
self.root = args[0]
self.action = args[1] if len(args) > 1 else "all"
if self.action == "loaddata":
if len(args) != 3:
return "foute argumenten voor loaddata*"
else:
self.load_from = args[2]
elif self.action not in ("copy", "activate", "undo", "all"):
return "foutief tweede argument (actie)*"
elif len(args) > 2:
return "teveel argumenten*"
found = False
with APPS.open() as oldfile:
for line in oldfile:
if 'X;{};'.format(self.root) in line:
found = True
if self.action not in ("loaddata", "undo"):
return "dit project is al geactiveerd"
if "_;{};".format(self.root) in line:
found = True
if self.action == "undo":
return "dit project is nog niet geactiveerd"
if found:
break
else: # if not found:
return "project niet gevonden"
# ok, rt, self.app, oms = line.strip().split(";")
self.app = line.strip().split(';')[2]
def do_all(self):
"""perform all-in-one
"""
self.do_copy()
self.activate()
if self.load_from:
self.loaddata()
def do_copy(self):
"""copy programs and templates
"""
print("creating and populating app root...")
(BASE / self.root).mkdir()
for name in ROOT_FILES:
self.copyover(name)
if self.root != "actiereg": # why?
print("creating templates...")
newdir = BASE / "templates" / self.root
newdir.mkdir()
for name in TEMPLATE_FILES:
fname = newdir / "{}.html".format(name)
fname.write_text('{{% extends "basic/{}.html" %}}\n'.format(name))
def activate(self):
"""database aanpassen en initiele settings data opvoeren
"""
self.update_settings()
# self.update_urlconf()
sys.path.append(BASE)
os.environ["DJANGO_SETTINGS_MODULE"] = 'actiereg.settings'
## import settings
from django.contrib.auth.models import Group, Permission
print("modifying database...")
self.call_manage(["syncdb"])
print("loading inital data...")
self.call_manage(['loaddata', '{}/initial_data.json'.format(self.root)])
print("setting up authorisation groups...")
group = Group.objects.create(name='{}_admin'.format(self.root))
for permit in Permission.objects.filter(
content_type__app_label="{}".format(self.root)):
group.permissions.add(permit)
group = Group.objects.create(name='{}_user'.format(self.root))
for permit in Permission.objects.filter(
content_type__app_label="{}".format(self.root)).filter(
content_type__model__in=['actie', 'event', 'sortorder',
'selection']):
group.permissions.add(permit)
self.update_appreg()
def loaddata(self):
"""load data from probreg (if present)
"""
print("getting probreg data")
with open("loaddata.py") as oldfile:
with open("load_data.py", "w") as newfile:
for line in oldfile:
newfile.write(line.replace("_basic", self.root))
import loaddata as ld
print("loading settings...", end=', ')
ld.loadsett(self.load_from)
print("ready.")
print("loading data...", end=', ')
ld.loaddata(self.load_from, self.root)
def undo(self):
"""reverse updates
"""
print("removing app root...")
shutil.rmtree(str(BASE / self.root))
if self.root != "actiereg":
print("removing templates...")
shutil.rmtree(str(BASE / "templates" / self.root))
self.update_settings()
# self.update_urlconf()
self.update_appreg()
def update_settings(self):
"""toevoegen aan settings.py (INSTALLED_APPS)
"""
print("updating settings...")
old, new = self.backup(BASE / "settings.py")
schrijf = False
with old.open() as oldfile:
with new.open("w") as newfile:
new_line = " 'actiereg.{}',\n".format(self.root)
for line in oldfile:
if line.strip() == "INSTALLED_APPS = (":
schrijf = True
if schrijf and line.strip() == ")" and self.action != "undo":
newfile.write(new_line)
schrijf = False
if line == new_line and self.action == "undo":
schrijf = False
else:
newfile.write(line)
def update_urlconf(self):
"""toevoegen aan urls.py (urlpatterns)
"""
print("updating urlconfs...")
old, new = backup(BASE / "urls.py")
schrijf = False
with old.open() as oldfile:
with new.open("w") as newfile:
new_line = " url(r'^{0}/', include('actiereg.{0}.urls')),\n".format(
self.root)
for line in oldfile:
if line.strip().startswith('urlpatterns'):
schrijf = True
if schrijf and line.strip() == "" and self.action != "undo":
newfile.write(new_line)
schrijf = False
if line == new_line and self.action == "undo":
schrijf = False
else:
newfile.write(line)
def update_appreg(self):
"""update apps registration
"""
print("updating apps registration...")
old, new = self.backup(APPS)
with old.open() as _in:
with new.open("w") as _out:
for app in _in:
ok, test_root, test_name, desc = app.split(";")
if test_root == self.root:
if self.action == "undo":
_out.write(app.replace("X;", "_;"))
else:
_out.write(app.replace("_;", "X;"))
else:
_out.write(app)
def copyover(self, name):
"""copy components for project
arguments: project name, file name, apps file as a Path object
"""
copyfrom = BASE / "_basic" / name
copyto = BASE / self.root / name
if name in SYMLINK: # make symlink instead of real copy
copyto.symlink_to(copyfrom)
return
with copyfrom.open() as oldfile:
with copyto.open("w") as newfile:
for line in oldfile:
if "basic" in line:
line = line.replace("_basic", self.root)
if line == 'ROOT = "basic"\n':
newfile.write('ROOT = "{}"\n'.format(self.root))
elif line == 'NAME = "demo"\n':
newfile.write('NAME = "{}"\n'.format(str(self.app)))
else:
newfile.write(line)
@staticmethod
def backup(fn):
"""remove current file as it is to be (re)written, saving a backup if necessary
input is a Path object
return file and backup as Path objects
"""
new = pathlib.Path(str(fn) + "~")
fn.replace(new)
return new, fn
@staticmethod
def call_manage(command):
"""call django command manager
"""
subprocess.run(["python", "manage.py"] + command)
def allnew():
"""create all new projects
"""
ret = ''
with open(APPS) as oldfile:
newapps = [line.split(";")[1] for line in oldfile if line.startswith('_')]
for app in newapps:
build = NewProj(app, 'all')
if build.msg:
result = ': '.join((app, build.msg))
break
build.do_stuff()
return result
if __name__ == "__main__":
if len(sys.argv) == 1:
result = 'insufficient arguments*'
elif sys.argv[1] == "*":
result = allnew()
else:
result = NewProj(*sys.argv[1:])
if result:
if result.endswith('*'):
result = '\n]n'.join((result[:-1], USAGE))
print(result)
| 2.296875 | 2 |
playstore_review_crawler/crawler/management/commands/save_app_reviews.py | abel-castro/playstore_review_crawler | 2 | 12757757 | from django.core.management.base import BaseCommand
from playstore_review_crawler.crawler.crawler import Crawler
from config.settings.base import (
APP_ID,
AMOUNT_REVIEWS_TO_SAVE,
REVIEWS_LANGUAGE,
REVIEWS_COUNTRY,
)
class Command(BaseCommand):
help = "Stores app reviews in the database."
def handle(self, *args, **options):
crawler = Crawler(app_id=APP_ID)
crawler.save_reviews(
amount=AMOUNT_REVIEWS_TO_SAVE,
language=REVIEWS_LANGUAGE,
country=REVIEWS_COUNTRY,
)
| 2.015625 | 2 |
authlib/auth.py | jmrafael/Streamlit-Authentication | 0 | 12757758 | # see: https://discuss.streamlit.io/t/authentication-script/14111
from os import environ as osenv
import time
from functools import wraps
import logging
import streamlit as st
from . import const, aes256cbcExtended, CookieManager
# ------------------------------------------------------------------------------
# Globals
ENC_PASSWORD = <PASSWORD>.get('ENC_PASSWORD')
ENC_NONCE = osenv.get('ENC_NONCE')
STORAGE = osenv.get('STORAGE', 'SQLITE')
COOKIE_NAME = osenv.get('COOKIE_NAME')
store = None
cookie_manager = CookieManager()
# ------------------------------------------------------------------------------
# Wrapping session state in a function ensures that 'user' (or any attribute really) is
# in the session state and, in my opinion, works better with Streamlit's execution model,
# e.g. if state is deleted from cache, it'll be auto-initialized when the function is called
def auth_state():
if 'user' not in st.session_state:
st.session_state.user = None
return st.session_state
auth_message = st.empty()
def set_auth_message(msg, type=const.INFO, delay=0.5, show_msgs=True):
global auth_message
if type == const.WARNING:
auth_message = st.warning
elif type == const.SUCCESS:
auth_message = st.success
elif type == const.ERROR:
auth_message = st.error
else: # default type == const.INFO:
auth_message = st.info
if show_msgs:
auth_message(msg)
if delay:
time.sleep(delay)
auth_message = st.empty()
# Easy inteceptor for auth
def requires_auth(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if auth_state().user != None:
return fn(*args, **kwargs)
else:
set_auth_message(f'{fn.__name__} requires authentication!')
return wrapper
@requires_auth
def logout():
auth_state().user = None
cookie_manager.delete(COOKIE_NAME)
def authenticated():
return auth_state().user != None
# ------------------------------------------------------------------------------
# Main auth service
def _auth(sidebar=True, show_msgs=True):
global store
if store == None:
try:
from authlib.repo.storage_factory import StorageFactory
# get the store
store = StorageFactory().get_provider(STORAGE, allow_db_create=False, if_table_exists='ignore')
# check the table
ctx = {'fields': "*", 'modifier': "LIMIT 1"}
store.query(context=ctx)
except Exception as ex:
logging.warning(f">>> Storage exception <<<\n`{str(ex)}`")
store = None
set_auth_message(
"Auth DB Not Found. Consider running admin script in standalone mode to generate it.",
type=const.WARNING,
show_msgs=True
)
header_widget = st.sidebar.subheader if sidebar else st.subheader
username_widget = st.sidebar.text_input if sidebar else st.text_input
password_widget = st.sidebar.text_input if sidebar else st.text_input
remember_me_widget = st.sidebar.checkbox if sidebar else st.checkbox
su_widget = st.sidebar.checkbox if sidebar else st.checkbox
logout_widget = st.sidebar.button if sidebar else st.button
header_widget('Authentication')
if auth_state().user == None:
# cookie login
cookie_manager.get_all()
user_in_cookie = cookie_manager.get(cookie=COOKIE_NAME)
if user_in_cookie:
ctx={'fields': "*", 'conds': f"username=\"{user_in_cookie[const.USERNAME]}\""}
data = store.query(context=ctx)
user = data[0] if data else None
# After checking for the presence of a user name, encrypted passwords are compared with each other.
if user and user[const.PASSWORD] == user_in_cookie[const.PASSWORD]:
auth_state().user = user
set_auth_message('Logging in...', type=const.SUCCESS, show_msgs=show_msgs)
st.experimental_rerun()
set_auth_message('Please log in', delay=None, show_msgs=True)
username = username_widget("Enter username", value='')
ctx={'fields': "*", 'conds': f"username=\"{username}\""}
data = store.query(context=ctx)
user = data[0] if data else None
if user:
decrypted_password = aes256cbcExtended(ENC_PASSWORD, ENC_NONCE).decrypt(user[const.PASSWORD])
password = password_widget("Enter password", type="password")
if password == decrypted_password:
# TODO: set active state and other fields then update DB
# Update user state, password is encrypted so secure
auth_state().user = user
set_auth_message('Logging in...', type=const.SUCCESS, show_msgs=show_msgs)
st.experimental_rerun()
if auth_state().user != None:
set_auth_message('Logged in', delay=None, show_msgs=True)
if logout_widget('Logout'):
logout()
set_auth_message('Logging out...', type=const.WARNING, show_msgs=show_msgs)
st.experimental_rerun()
if auth_state().user[const.SU] == 1:
if su_widget(f"Super users can edit user DB"):
_superuser_mode()
if cookie_manager.get(cookie=COOKIE_NAME):
if not remember_me_widget("Remember me", value=True):
cookie_manager.delete(COOKIE_NAME)
else:
if remember_me_widget("Remember me", value=False):
cookie_manager.set(COOKIE_NAME, auth_state().user)
return auth_state().user[const.USERNAME] if auth_state().user != None else None
def auth(*args, **kwargs):
with st.expander('Authentication', expanded=True):
return _auth(*args, **kwargs)
# ------------------------------------------------------------------------------
# Helpers
@requires_auth
def _list_users():
st.subheader('List users')
ctx = {'fields': "username, password, su"}
data = store.query(context=ctx)
if data:
st.table(data)
else:
st.write("`No entries in authentication database`")
@requires_auth
def _create_user(name=const.BLANK, pwd=const.BLANK, is_su=False, mode='create'):
st.subheader('Create user')
username = st.text_input("Enter Username (required)", value=name)
if mode == 'create':
password = st.text_input("Enter Password (required)", value=pwd, type='password')
elif mode == 'edit':
# Do not display password as DB stores them encrypted
# Passwords will always be created anew in edit mode
password = st.text_input("Enter Replacement Password (required)", value=const.BLANK)
su = 1 if st.checkbox("Is this a superuser?", value=is_su) else 0
if st.button("Update Database") and username:
if password: # new password given
encrypted_password = <PASSWORD>(ENC_PASSWORD, ENC_NON<PASSWORD>(password)
elif mode == 'edit': # reuse old one
encrypted_password = <PASSWORD>
elif mode == 'create': # Must have a password
st.write("`Database NOT Updated` (enter a password)")
return
# TODO: user_id, password, logged_in, expires_at, logins_count, last_login, created_at, updated_at, su
ctx = {'data': {"username": f"{username}", "password": f"{<PASSWORD>}", "su": su}}
store.upsert(context=ctx)
st.write("`Database Updated`")
@requires_auth
def _edit_user():
st.subheader('Edit user')
ctx = {'fields': "username"}
userlist = [row[const.USERNAME] for row in store.query(context=ctx)]
userlist.insert(0, "")
username = st.selectbox("Select user", options=userlist)
if username:
ctx = {'fields': "username, password, su", 'conds': f"username=\"{username}\""}
user_data = store.query(context=ctx)
_create_user(
name=user_data[0][const.USERNAME],
pwd=user_data[0][const.PASSWORD],
is_su=user_data[0][const.SU],
mode='edit'
)
@requires_auth
def _delete_user():
st.subheader('Delete user')
ctx = {'fields': "username"}
userlist = [row[const.USERNAME] for row in store.query(context=ctx)]
userlist.insert(0, "")
username = st.selectbox("Select user", options=userlist)
if username:
if st.button(f"Remove {username}"):
ctx = {'conds': f"username=\"{username}\""}
store.delete(context=ctx)
st.write(f"`User {username} deleted`")
@requires_auth
def _superuser_mode():
st.header(f'Super user mode (store = {STORAGE})')
modes = {
"View": _list_users,
"Create": _create_user,
"Edit": _edit_user,
"Delete": _delete_user,
}
mode = st.radio("Select mode", modes.keys())
modes[mode]()
# ------------------------------------------------------------------------------
# Allows storage provider to be overriden programmatically
def override_env_storage_provider(provider):
try:
assert(provider in ['SQLITE', 'AIRTABLE'])
global STORAGE
STORAGE = provider
except:
raise ValueError(f'Unkown provider `{provider}`')
# ------------------------------------------------------------------------------
# Service run from standalone admin app - allows (SQLite) DB to be created
def admin():
st.warning("Warning, superuser mode")
if st.checkbox("I accept responsibility and understand this mode can be used to initialise and make changes to the authentication database"):
from authlib.repo.storage_factory import StorageFactory
global store
store = StorageFactory().get_provider(STORAGE, allow_db_create=True, if_table_exists='ignore')
# Fake the admin user token to enable superuser mode (password field isn't required)
auth_state().user = {'username': 'admin', 'su': 1}
_superuser_mode()
| 2.734375 | 3 |
eval.py | Nicolik/SimpleCNNClassifier | 11 | 12757759 | <gh_stars>10-100
############################################
# <NAME> (2020)
#
# This is the script which contains the code for evaluating the CNN.
# You have to train your CNN before. See train.py
############################################
import torch
import torchvision
from sklearn.metrics import confusion_matrix, classification_report
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from config import *
from net import Net
from utils import make_pred_on_dataloader, get_classes, subsample_dataset
#%% Create Train Dataloaders
print("Creating training dataset from ", train_folder)
train_dataset = torchvision.datasets.ImageFolder(
root=train_folder,
transform=transform_test
)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,
shuffle=False, num_workers=num_workers)
val_dataset = torchvision.datasets.ImageFolder(
root=val_folder,
transform=transform_test
)
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size,
shuffle=False, num_workers=num_workers)
classes, classes_dict = get_classes()
labels_idxs = []
target_names = []
for el in classes_dict:
target_names.append(el)
labels_idxs.append(classes_dict[el])
#%% Load the net and use eval mode
logs_dir = './logs'
net = Net(in_channels=CHANNELS, out_features=NUM_CLASSES)
PATH = os.path.join(logs_dir, 'dog_vs_cat.pth')
net.load_state_dict(torch.load(PATH))
# Move the net on CUDA
cuda = torch.cuda.is_available()
if cuda:
net = net.cuda()
net = net.eval()
#%% Make prediction on train set
y_true_train, y_pred_train = make_pred_on_dataloader(net, train_dataloader)
#%% Compute metrics on train set
cf_train = confusion_matrix(y_true_train, y_pred_train, labels=labels_idxs)
cr_train = classification_report(y_true_train, y_pred_train, target_names=target_names, output_dict=True)
print(classification_report(y_true_train, y_pred_train, target_names=target_names, output_dict=False))
#%% Make prediction on val set
y_true_test, y_pred_test = make_pred_on_dataloader(net, val_dataloader)
#%% Compute metrics on val set
cf_test = confusion_matrix(y_true_test, y_pred_test, labels=labels_idxs)
cr_test = classification_report(y_true_test, y_pred_test, target_names=target_names, output_dict=True)
print(classification_report(y_true_test, y_pred_test, target_names=target_names, output_dict=False)) | 2.609375 | 3 |
scripts/sorno_amazon_wishlist_scrape.py | hermantai/sorno-py-scripts | 0 | 12757760 | #!/usr/bin/env python
"""A script to scrape items from an Amazon wishlist. The script only works for
wishlists which are "Public". You can change the settings by following the
instruction in:
http://www.amazon.com/gp/help/customer/display.html?nodeId=501094
Copyright 2014 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
from collections import namedtuple
import logging
import os
import sys
import urlparse
import requests
from lxml import html
from sorno import loggingutil
from sorno import consoleutil
_LOG = logging.getLogger(__name__)
_PLAIN_LOGGER = None # will be created in main()
_PLAIN_ERROR_LOGGER = None # will be created in main()
Item = namedtuple('Item', 'id title url')
class App(object):
WISHLIST_PAGE_TEMPLATE = (
"https://www.amazon.com/gp/registry/wishlist"
+ "/{wishlist_id}/?page={page_number}"
)
HEADERS = {
'user-agent': 'Mozilla/5.0 ;Windows NT 6.1; WOW64; Trident/7.0; rv:11.0; like Gecko',
}
def __init__(self, wishlist_id):
self.wishlist_id = wishlist_id
def run(self):
# stores the id's of items
seen_items = set()
page_number = 1
item_number = 1
num_of_empty_page_reached = 0
while True:
items = self.get_items_from_page_num(page_number)
rows = []
for item in items:
if item.id in seen_items:
_LOG.debug("Seen title %s, skip it", item.title)
else:
seen_items.add(item.id)
rows.append(
{
'no.': str(item_number),
'title': item.title,
'url': item.url if item.url else "",
}
)
item_number += 1
num_of_empty_page_reached = 0
if not rows:
num_of_empty_page_reached += 1
if num_of_empty_page_reached >= 3:
# All items are seen in the fetch, so we are done
# Sometimes amazon returns 0 items even we havn't reached
# to the end, so give it a few trials
break
else:
continue
data_printer = consoleutil.DataPrinter(
rows,
headers=('no.', 'title', 'url'),
delimiter='\t',
print_func=_PLAIN_LOGGER.info,
)
data_printer.print_result(
style=consoleutil.DataPrinter.PRINT_STYLE_PLAIN
)
page_number += 1
def get_items_from_page_num(self, num):
url = self.WISHLIST_PAGE_TEMPLATE.format(
wishlist_id=self.wishlist_id,
page_number=num,
)
_LOG.debug("Fetch from: %s", url)
wishlist_page = requests.get(url)
wishlist_page_html = wishlist_page.text
_PLAIN_ERROR_LOGGER.debug(wishlist_page_html)
tree = html.fromstring(wishlist_page_html)
all_h5_nodes = tree.xpath("//div[@class='a-row a-size-small']/h5")
items = []
for h5_node in all_h5_nodes:
try:
item = self._get_item_from_idea_h5_node(h5_node)
if not item:
item = self._get_item_from_amazon_item_h5_node(h5_node)
if item:
items.append(item)
else:
_LOG.warn("Fail to retrieve an item for snippet")
_PLAIN_ERROR_LOGGER.warn("===== Start of snippet =====")
_PLAIN_ERROR_LOGGER.warn(html.tostring(h5_node))
_PLAIN_ERROR_LOGGER.warn("===== End of snippet =====")
except ValueError as ex:
_LOG.exception("Fail to retrieve an item: %s", ex)
_PLAIN_ERROR_LOGGER.warn("===== Start of snippet =====")
_PLAIN_ERROR_LOGGER.warn(html.tostring(h5_node))
_PLAIN_ERROR_LOGGER.warn("===== End of snippet =====")
return items
def _get_item_from_idea_h5_node(self, h5_node):
"""
Gets the item in a H5 html node that contains an Idea. Returns
None if an Idea cannot be found.
The H5 html node supposes to be like the following, "{param}" denotes
the parameters of the item:
<h5>
...
<span id="itemName_{item id}">{item title}</span>
...
</h5>
"""
span_nodes = h5_node.xpath(
".//span[contains(@id, 'itemName_')]"
)
if not span_nodes:
return None
span_node = span_nodes[0]
item_title = self.get_text_from_element(span_node)
item_id = span_node.attrib['id'].split('itemName_')[1]
return Item(id=item_id, title=item_title, url=None)
def _get_item_from_amazon_item_h5_node(self, h5_node):
"""
Gets the item in a H5 html node that contains an Amazon item. Returns
None if an Amazon item cannot be found. An Amazon item is an item in
wishlish that is sold in Amazon.
The H5 html node supposes to be like the following, "{param}" denotes
the parameters of the item:
<h5>
...
<a id="itemName_{item id}" href="{item url}">{item title}</a>
...
</h5>
"""
anchor_nodes = h5_node.xpath(".//a[contains(@id, 'itemName_')]")
if anchor_nodes:
# This is an Amazon item node
anchor_node = anchor_nodes[0]
item_url = "http://www.amazon.com" + anchor_node.attrib['href']
item_title = self.get_text_from_element(anchor_node).strip()
item_id = anchor_node.attrib['id'].split('itemName_')[1]
return Item(id=item_id, title=item_title, url=item_url)
return None
def same_item_lists(self, prev_items, items):
if prev_items is None or len(prev_items) != len(items):
return False
for prev, cur in zip(prev_items, items):
prev_query = urlparse.urlparse(prev.attrib['href']).query
cur_query = urlparse.urlparse(cur.attrib['href']).query
if prev_query != cur_query:
return False
return True
def get_text_from_element(self, node):
"""
Return a plain text representation of an html node.
"""
text_segments = []
self._collect_text_from_element(node, text_segments)
return "".join(text_segments)
def _collect_text_from_element(self, node, text_segments):
"""
Collect text from node and all its children recursively and put into
text_segments as a list of strings.
"""
if node.tag.lower() == "br":
text_segments.append(os.linesep)
if node.text:
text_segments.append(node.text)
for child in node:
self._collect_text_from_element(child, text_segments)
if node.tail:
text_segments.append(node.tail)
def parse_args(cmd_args):
description = """
A script to scrape items from an Amazon wishlist. The script only
works for wishlists which are "Public". You can change the settings by
following the instruction in:
http://www.amazon.com/gp/help/customer/display.html?nodeId=501094
"""
parser = argparse.ArgumentParser(
description=description,
)
parser.add_argument(
"--debug",
action="store_true",
)
parser.add_argument(
"wishlist_id",
help="When you look at the URL of your wishlist, it's something like"
+ " https://www.amazon.com/gp/registry/wishlist/<wishlist id>/ref=cm_wl_list_o_0?"
+ ", so just copy the wishlist id for this argument",
)
args = parser.parse_args(cmd_args)
return args
def main():
global _PLAIN_LOGGER, _PLAIN_ERROR_LOGGER
args = parse_args(sys.argv[1:])
loggingutil.setup_logger(_LOG, debug=args.debug)
_PLAIN_LOGGER = loggingutil.create_plain_logger("PLAIN")
_PLAIN_ERROR_LOGGER = loggingutil.create_plain_logger(
"PLAIN_ERROR",
stdout=False,
)
app = App(args.wishlist_id)
app.run()
if __name__ == '__main__':
main()
| 3.109375 | 3 |
hard/03_postal_codes.py | UltiRequiem/hacker-rank-python | 4 | 12757761 | <reponame>UltiRequiem/hacker-rank-python<filename>hard/03_postal_codes.py
from re import match, findall
regex_integer_in_range = r"_________"
regex_alternating_repetitive_digit_pair = r"_________"
if __name__ == "__main__":
string = input()
print(bool(match(r"^[1-9][\d]{5}$", string) and len(findall(r"(\d)(?=\d\1)", string)) < 2))
| 3.578125 | 4 |
discordbot/commands/minigames/flood_cmd.py | EnderXVIII/MiniGamesBot | 20 | 12757762 | <reponame>EnderXVIII/MiniGamesBot
from discordbot.categories.minigames import Minigames
from discordbot.commands.command import Command
from discordbot.discordminigames.singleplayergames.flood_dc import FloodDiscord
from discordbot.user.singleplayersession import SinglePlayerSession
class FloodCommand(Command):
bot = None
name = "flood"
help = "Get the grid to turn into one color by iteratively flooding it, check out the rules with the rules command."
brief = "Get the grid to turn into one color by iteratively flooding it."
args = ""
category = Minigames
@classmethod
async def invoke(cls, context):
message = await context.send("Starting **flood** minigame")
session = SinglePlayerSession(message, "flood", FloodDiscord, context.author)
await cls.bot.game_manager.start_session(session)
| 2.5625 | 3 |
Space-Combat-Sim/SpaceSim/Components/Component.py | tannervoas742/Simulations | 0 | 12757763 | import cupy as cp
from SpaceSim.BackEndSources.DataStructures import MathList, TypeCounter
from SpaceSim.BackEndSources.Utils import TableToText
class _Component:
def __init__(self, *Modules):
self.Name = 'NONE'
self.Types = []
self.Stats = {}
self.Define('Armor', 0)
self.Define('Endurance', 0)
self.Define('Shielding', MathList(0, 0))
self.Define('Energy', 0)
self.Define('Power', 0)
self.Define('Tick', 0)
self.Define('Tock', 0)
self.Define('Energy Storage', 0)
self.Define('Cargo Storage', 0)
self.Define('Bandwidth', 0)
self.Define('Mass', 0)
self.Define('Thrust', 0)
self.Define('Ammunition', TypeCounter())
ModuleCounter = {}
for Item in Modules:
self = self + Item
if Item.Name not in ModuleCounter:
ModuleCounter[Item.Name] = 0
ModuleCounter[Item.Name] += 1
if len(Modules) > 0:
self.Name = '{} ({})'.format(self.Name, ', '.join(list(map(lambda Key: '{}x{}'.format(ModuleCounter[Key], Key))))).replace('(, ', '(')
def Define(self, Stat, Value=None):
if Value != None:
self.Stats[Stat] = Value
def Finish(self):
for Ammo in self.Stats['Ammunition']:
self.Stats['Mass'] += Ammo().Mass * self.Stats['Ammunition'][Ammo]
def __add__(self, Other):
for Key in self.Stats:
if type(self.Stats[Key]) == dict:
for SubKey in Other.Stats[Key]:
if SubKey in self.Stats[Key]:
self.Stats[Key][SubKey] += Other.Stats[Key][SubKey]
else:
self.Stats[Key][SubKey] = Other.Stats[Key][SubKey]
else:
self.Stats[Key] = self.Stats[Key] + Other.Stats[Key]
return self
def __repr__(self):
Output = ''
Output += '{}\n'.format(self.Name)
Output += TableToText(self.Stats) + '\n'
for Ammo in self.Stats['Ammunition']:
Output += str(Ammo()) + '\n'
return Output.rstrip()
| 2.265625 | 2 |
pywechat/main.py | OctavianLee/pywechat | 3 | 12757764 | # -*- coding: utf-8 -*-
from pywechat.services.wechat_shake import ShakeService
from pywechat.services.wechat_card import CardService
from pywechat.excepts import CodeBuildError
class WechatService(object):
"""This class is a role of factory.
Attributes:
app_id: the app id of a wechat account.
app_secret: the app secret of a wechat account.
"""
def __init__(self, app_id, app_secret):
"""Initializes the class."""
self.__app_id = app_id
self.__app_secret = app_secret
def init_service(self, service_name):
"""Init the service of wechat by service_name.
Args:
service_name: the name of wechat's service.
Returns:
the service of wechat
Rasies:
SystemError
"""
services = {
'Shake': ShakeService,
'Card': CardService
}
if not services.has_key(service_name):
raise CodeBuildError('Service name wrong')
return services[service_name](self.__app_id, self.__app_secret)
| 2.8125 | 3 |
disco/extensions/pydss_simulation/time_series_impact_analysis_configuration.py | daniel-thom/disco | 2 | 12757765 |
import copy
import logging
from disco.extensions.pydss_simulation.pydss_configuration import \
PyDssConfiguration
from disco.extensions.pydss_simulation.pydss_inputs import PyDssInputs
from disco.pydss.common import ConfigType
from jade.utils.utils import load_data
logger = logging.getLogger(__name__)
def auto_config(inputs, **kwargs):
"""Create a configuration from all available inputs."""
if isinstance(inputs, str):
inputs = PyDssInputs(inputs)
config = PyDssConfiguration(inputs, **kwargs)
for job in config.inputs.iter_jobs():
config.add_job(job)
#exports = load_data(exports_filename)
#config.set_pydss_config(ConfigType.EXPORTS, exports)
return config
| 2 | 2 |
applications/SwimmingDEMApplication/test_examples/swimming_DEM_run_all_benchmarks.py | lcirrott/Kratos | 2 | 12757766 | from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
import os
import sys
import platform
kratos_benchmarking_path = '../../../benchmarking'
sys.path.append(kratos_benchmarking_path)
swimming_dem_scripts_path = 'hydrodynamic_forces'
sys.path.append(swimming_dem_scripts_path)
import benchmarking
os.chdir(swimming_dem_scripts_path)
def Run():
print("\nStarting swimming_DEM Benchmarks..............\n")
Text=""
if platform.system()=="Windows":
os.system("python hydrodynamic_forces.py " + " > BenchTemp.txt")
else:
if sys.version_info >= (3, 0):
os.system("python3 hydrodynamic_forces.py " + " > BenchTemp.txt")
else:
os.system("python -3 hydrodynamic_forces.py " + " > BenchTemp.txt")
os.remove("BenchTemp.txt")
f = open("hydrodynamic_forces.txt")
file_contents = f.read()
f.close()
Text += file_contents.rstrip("\n")
Text += "\n\n\n"
return Text
if __name__ == '__main__':
print(Run())
| 2.390625 | 2 |
Algos/egyptianFraction.py | Lin0818/py-study-notebook | 1 | 12757767 | <filename>Algos/egyptianFraction.py
"""
Greedy Algorithm for Egyptian Fraction
Every positive fraction can be represented as sum of unique unit fractions. A fraction is unit fraction
if numerator is 1 and denominator is a positive integer, for example 1/3 is a unit fraction. Such a
representation is called Egyptian Fraction as it was used by ancient Egyptians.
Following are few examples:
Egyptian Fraction Representation of 2/3 is 1/2 + 1/6
Egyptian Fraction Representation of 6/14 is 1/3 + 1/11 + 1/231
Egyptian Fraction Representation of 12/13 is 1/2 + 1/3 + 1/12 + 1/156
We can generate Egyptian Fractions using Greedy Algorithm. For a given number of the form ‘nr/dr’ where dr > nr,
first find the greatest possible unit fraction, then recur for the remaining part. For example, consider 6/14,
we first find ceiling of 14/6, i.e., 3. So the first unit fraction becomes 1/3, then recur for (6/14 – 1/3) i.e.,
4/42.
"""
import math
def egyptianFraction(nr, dr):
"""params
nr numerator
dr denominator
"""
res = []
while nr != 0:
x = math.ceil(dr/nr)
res.append(x)
nr = nr*x - dr
dr = dr*x
for i in range(len(res)):
if i != len(res) - 1:
print('1/{} +'.format(res[i]), end=' ')
else:
print('1/{}'.format(res[i]), end='\n')
if __name__ == '__main__':
egyptianFraction(6, 14)
egyptianFraction(12, 13)
egyptianFraction(2, 3)
egyptianFraction(5, 6)
| 4.0625 | 4 |
Process.py | caetera/RawMetaData | 0 | 12757768 | <gh_stars>0
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
from os import path, listdir, chdir
from FileRecord import record_from_file, SQLABase, FileRecord
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from datetime import datetime
def recursiveListing(filepath, process_file):
"""
Recorsively walk filepath and apply process_file function to all RAW files
"""
for name in listdir(filepath):
currentpath = path.join(filepath, name)
if path.isdir(currentpath):
print("Entering folder: {}".format(currentpath))
recursiveListing(currentpath, process_file)
elif name.lower().endswith("raw"):
process_file(currentpath)
def build_path(dirpath, outputfile):
"""
Build function
"""
#some checks
if not path.exists(dirpath):
print("Path does not exist!")
return 1
if not path.isdir(dirpath):
print("Path is not folder")
return 1
#for now SQLite
try:
output = create_engine("sqlite:///{}".format(outputfile))
except:
print("Cannot create output file")
return 1
SQLABase.metadata.create_all(output)
session = sessionmaker(bind=output)()
def record_wrapper(filename):
record = record_from_file(filename)
session.add(record)
session.commit()
chdir(dirpath)
recursiveListing(".", record_wrapper)
def update_path(dirpath, outputfile):
"""
Update function
"""
#some checks
if not path.exists(dirpath):
print("Path does not exist!")
return 1
if not path.isdir(dirpath):
print("Path is not folder")
return 1
#for now SQLite
try:
output = create_engine("sqlite:///{}".format(outputfile))
except:
print("Cannot open output file")
return 1
session = sessionmaker(bind=output)()
#create filelist of known files
known_files = set([record.VDSPath for record in
session.query(FileRecord.VDSPath).filter(FileRecord.Error != True)])
error_files = set([record.VDSPath for record in
session.query(FileRecord.VDSPath).filter(FileRecord.Error == True)])
def record_wrapper(filename):
if not filename in known_files:
record = record_from_file(filename)
if filename in error_files:
print("{} is error".format(filename))
session.query(FileRecord).filter(FileRecord.VDSPath == filename).delete()
else:
print("{} is new".format(filename))
session.add(record)
session.commit()
else:
print("{} is known".format(filename))
chdir(dirpath)
recursiveListing(".", record_wrapper)
if __name__ == "__main__":
if len(sys.argv) > 3:
start = datetime.now()
if sys.argv[1].lower() == "build":
build_path(sys.argv[2], sys.argv[3])
elif sys.argv[1].lower() == "update":
update_path(sys.argv[2], sys.argv[3])
else:
print("Unknown subcommand: {}".format(sys.argv[1]))
end = datetime.now()
print("Start: {}; End: {}; Duration: {}".format(start, end, end - start))
else:
print("Usage: Process.py [subcommand] [Path to process] [SQLite database file]") | 2.640625 | 3 |
wiki/web/user.py | CrispyCabot/440Project | 0 | 12757769 | """
User classes & helpers
~~~~~~~~~~~~~~~~~~~~~~
"""
import os
import json
import binascii
import hashlib
import sqlite3
from functools import wraps
from flask import current_app
from flask_login import current_user
from config import USER_DIR
class UserManager(object):
"""A very simple user Manager, that saves it's data as json."""
def __init__(self, path):
self.file = os.path.join(USER_DIR, 'users.json')
self.dbConnection = sqlite3.connect(USER_DIR + '/Users.sqlite')
def read(self):
if not os.path.exists(self.file):
return {}
with open(self.file) as f:
data = json.loads(f.read())
return data
def write(self, data):
with open(self.file, 'w') as f:
f.write(json.dumps(data, indent=2))
def add_user(self, name, password,
active=True, roles=[], authentication_method=None):
users = self.read()
if authentication_method is None:
authentication_method = get_default_authentication_method()
"""
This is the Only information that is stored in users.json now. The passwords are only in the database now.
Removing this information from the JSON and putting it in the database was causing a lot of errors with Jinja, so I kept
this Json here to keep the login functionality working. Ideally everything would just be in the database, but then the
entire login system would have to be completely overhauled, so I left it as is for simplicity.
"""
new_user = {
'active': active,
'roles': roles,
'authenticated': False
}
users[name] = new_user
self.write(users)
userdata = users.get(name)
"""
This opens a connection to the database, and inserts the new user.
The new user is not inserted into the database if someone has the same
username.
"""
try:
dbCur = self.dbConnection.cursor()
dbCur.execute("""
INSERT INTO users (username,password)
VALUES( (?) , (?));
""", (name, password))
self.dbConnection.commit()
dbCur.close()
dbCon = sqlite3.connect(USER_DIR + '/Users.sqlite')
dbCur = dbCon.cursor()
dbCur.execute("SELECT username FROM users WHERE username = ?", name)
userdata = dbCur.fetchone()
return User(self, name, userdata)
except:
return
def get_user(self, name):
users = self.read()
userdata = users.get(name)
if not userdata:
return None
return User(self, name, userdata)
def delete_user(self, name):
dbCur = self.dbConnection.cursor()
dbCur.execute("""
DELETE FROM users
WHERE username = ?
""", (name,))
self.dbConnection.commit()
dbCur.close()
def update(self, name, userdata):
data = self.read()
data[name] = userdata
self.write(data)
class User(object):
def __init__(self, manager, name, data):
self.manager = manager
self.name = name
self.data = data
def get(self, option):
return self.data.get(option)
def set(self, option, value):
self.data[option] = value
self.save()
def save(self):
self.manager.update(self.name, self.data)
def is_authenticated(self):
return self.data.get('authenticated')
def is_active(self):
return self.data.get('active')
def is_anonymous(self):
return False
def get_id(self):
return self.name
"""Not Used"""
def check_password(self, password):
"""Return True, return False, or raise NotImplementedError if the
authentication_method is missing or unknown."""
authentication_method = self.data.get('authentication_method', None)
if authentication_method is None:
authentication_method = get_default_authentication_method()
# See comment in UserManager.add_user about authentication_method.
if authentication_method == 'hash':
result = check_hashed_password(password, self.get('hash'))
elif authentication_method == 'cleartext':
result = (self.get('password') == password)
else:
raise NotImplementedError(authentication_method)
return result
def get_default_authentication_method():
return current_app.config.get('DEFAULT_AUTHENTICATION_METHOD', 'cleartext')
def make_salted_hash(password, salt=None):
if not salt:
salt = os.urandom(64)
d = hashlib.sha512()
d.update(salt[:32])
d.update(password)
d.update(salt[32:])
return binascii.hexlify(salt) + d.hexdigest()
def check_hashed_password(password, salted_hash):
salt = binascii.unhexlify(salted_hash[:128])
return make_salted_hash(password, salt) == salted_hash
def protect(f):
@wraps(f)
def wrapper(*args, **kwargs):
if current_app.config.get('PRIVATE') and not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
return f(*args, **kwargs)
return wrapper
| 3.109375 | 3 |
tests/basics/generator1.py | geowor01/micropython | 7 | 12757770 | def f(x):
print('a')
y = x
print('b')
while y > 0:
print('c')
y -= 1
print('d')
yield y
print('e')
print('f')
return None
for val in f(3):
print(val)
#gen = f(3)
#print(gen)
#print(gen.__next__())
#print(gen.__next__())
#print(gen.__next__())
#print(gen.__next__())
# test printing, but only the first chars that match CPython
print(repr(f(0))[0:17])
print("PASS") | 3.703125 | 4 |
mmtbx/command_line/mp_geo.py | rimmartin/cctbx_project | 0 | 12757771 |
from __future__ import division
import sys
from mmtbx.validation.molprobity import mp_geo
if __name__ == "__main__":
mp_geo.run(sys.argv[1:])
| 1.140625 | 1 |
TopCompiler/RandomGen.py | CodeClubLux/TopCompiler | 4 | 12757772 | <filename>TopCompiler/RandomGen.py<gh_stars>1-10
file = open("test/TopCompiler/"+input("filename: "), mode= "w")
sizeOfFunc = input("size of func: ")
lines = input("lines of code: ")
out = []
for i in range(int(int(lines) / int(sizeOfFunc)+2)):
out.append("def func"+str(i)+"() =\n")
for c in range(int(sizeOfFunc)):
out.append(' println "hello world" \n')
file.write("".join(out))
file.close() | 3.21875 | 3 |
app.py | Gornak40/algolimp | 0 | 12757773 | <gh_stars>0
from flask import Flask, render_template, url_for, request, session, redirect, send_from_directory, send_file
from cfApi import *
from random import randint
from werkzeug.security import generate_password_hash, check_password_hash
from flask_sqlalchemy import SQLAlchemy
# from flask_ngrok import run_with_ngrok
interfaces = [
'name',
'other',
'description',
'urls',
'paste',
'user'
]
app = Flask(__name__)
# run_with_ngrok(app)
app.config['SECRET_KEY'] = generate_password_hash('<NAME>')
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////home/gornak40/code/yandex/algolimp/data/algo.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class Algo(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, unique=True, nullable=False)
other = db.Column(db.String, nullable=False)
description = db.Column(db.Text, nullable=False)
urls = db.Column(db.Text, nullable=False)
paste = db.Column(db.String, unique=True, nullable=False)
user = db.Column(db.String, nullable=False)
def updateDB():
A = Algo()
A.name = session['algo']['name']
A.other = session['algo']['other']
A.description = session['algo']['description']
A.urls = session['algo']['urls']
A.paste = session['algo']['paste']
A.user = session['algo']['user']
db.session.add(A)
db.session.commit()
def changeDB(note):
db.session.merge(note)
db.session.commit()
def deleteDB(note):
db.session.delete(note)
db.session.commit()
@app.route('/')
def index():
kwargs = dict()
kwargs['title'] = 'Home'
kwargs['content_title'] = 'Home'
return render_template('index.html', **kwargs, **session)
@app.route('/algo/adding/', methods=['post', 'get'])
def adding():
kwargs = dict()
kwargs['title'] = 'Adding'
kwargs['content_title'] = 'Adding'
if not session.get('user'):
session['message'] = 'Уважаемый аноним, залогиньтесь!'
return redirect('/message')
if request.method == 'POST':
session['algo'] = session.get('algo', dict())
name = request.form.get('name')
other = request.form.get('other')
description = request.form.get('description')
urls = request.form.get('urls')
paste = request.form.get('paste')
user = session.get('user')
if name:
session['algo']['name'] = name
if other:
session['algo']['other'] = other
if description:
session['algo']['description'] = description
if urls:
session['algo']['urls'] = urls
if paste:
session['algo']['paste'] = paste
session['algo']['user'] = user['handle']
if (all([session['algo'].get(i) for i in interfaces])):
# здесь я типо базу данных обновляю
updateDB()
session.pop('algo')
return redirect('/algo')
return render_template('adding.html', **kwargs, **session)
@app.route('/algo/', methods=['post', 'get'])
def algo():
kwargs = dict()
kwargs['title'] = 'Algo'
kwargs['content_title'] = 'Algo'
session.pop('algo') if session.get('algo') else None
if request.method == 'POST' and session.get('auth'):
return redirect('/algo/adding')
data = reversed(Algo.query.all())
flt = request.args.get('search', '')
if not flt or flt[0] != '@':
data = filter(lambda x: searchFunc(x, flt), data)
else:
data = filter(lambda x: authorFunc(x, flt[1:]), data)
return render_template('algo.html', **kwargs, **session, data=data)
@app.route('/login/', methods=['post', 'get'])
def login():
kwargs = dict()
kwargs['title'] = 'Login'
kwargs['content_title'] = 'Login'
kwargs['sendMail'] = sendMail
if request.method == 'POST':
handle = request.form.get('handle')
code = request.form.get('code')
if handle:
session['handle'] = handle
session['user'] = userInfo(handle)
session['color'] = getColor(session['user'])
session['email'] = session['user'].get('email') if session['user'] else None
session['secret'] = str(randint(1000, 9999))
session['auth'] = False
session['code'] = code if code else str()
if session['code'] and session['secret'] and session['code'] == session['secret']:
session['auth'] = True
return render_template('login.html', **kwargs, **session)
@app.route('/logout/')
def logout():
kwargs = dict()
kwargs['title'] = 'Logout'
kwargs['content_title'] = 'Logout'
session.clear()
# session.pop('email') if session.get('email') else None
# session.pop('code') if session.get('code') else None
# session.pop('handle') if session.get('handle') else None
# session.pop('secret') if session.get('secret') else None
# session.pop('auth') if session.get('auth') else None
return render_template('logout.html', **kwargs, **session)
@app.route('/reading/<id>/')
def reading(id):
kwargs = dict()
ans = Algo.query.filter(Algo.id == int(id))[0]
kwargs['title'] = ans.name
kwargs['content_title'] = ans.name
kwargs['getTitle'] = getTitle
return render_template('reading.html', **kwargs, **session, note=ans)
@app.route('/edit/<id>/', methods=['post', 'get'])
def edit(id):
kwargs = dict()
kwargs['title'] = 'Edit'
kwargs['content_title'] = 'Edit'
note = Algo.query.filter(Algo.id == int(id))[0]
kwargs['algo'] = dict()
kwargs['algo']['name'] = note.name
kwargs['algo']['other'] = note.other
kwargs['algo']['description'] = note.description
kwargs['algo']['urls'] = note.urls
kwargs['algo']['paste'] = note.paste
if not session.get('auth'):
session['message'] = 'Уважаемый аноним, залогиньтесь!'
return redirect('/message')
if not session.get('user') or session['user']['handle'] != note.user:
session['message'] = 'Вы не являетесь автором этой записи!'
return redirect('/message')
if request.method == 'POST':
session['algo'] = session.get('algo', dict())
name = request.form.get('name')
other = request.form.get('other')
description = request.form.get('description')
urls = request.form.get('urls')
paste = request.form.get('paste')
user = session.get('user')
if name:
session['algo']['name'] = name
if other:
session['algo']['other'] = other
if description:
session['algo']['description'] = description
if urls:
session['algo']['urls'] = urls
if paste:
session['algo']['paste'] = paste
session['algo']['user'] = user['handle']
if (all([session['algo'].get(i) for i in interfaces])):
# здесь я типо базу данных обновляю
session['algo']['name'] = note.name = name
session['algo']['other'] = note.other = other
session['algo']['description'] = note.description = description
session['algo']['urls'] = note.urls = urls
session['algo']['paste'] = note.paste = paste
changeDB(note)
session.pop('algo')
return redirect('/algo')
kwargs.pop('algo') if session.get('algo') else None
return render_template('adding.html', **kwargs, **session, note=note)
@app.route('/delete/<id>/')
def delete(id):
note = Algo.query.filter(Algo.id == int(id))[0]
if not session.get('auth'):
session['message'] = 'Уважаемый аноним, залогиньтесь!'
return redirect('/message')
if session.get('user')['handle'] == note.user:
deleteDB(note)
return redirect('/algo')
session['message'] = 'Вы не являетесь автором этой записи!'
return redirect('/message')
@app.route('/message/')
def message():
kwargs = dict()
kwargs['title'] = 'Message'
kwargs['content_title'] = 'Message'
return render_template('message.html', **kwargs, **session)
@app.route('/sponsors/')
def sponsors():
kwargs = dict()
kwargs['title'] = 'Sponsors'
kwargs['content_title'] = 'Sponsors'
return render_template('sponsors.html', **kwargs, **session)
@app.route('/downloads/')
def downloads():
kwargs = dict()
kwargs['title'] = 'Downloads'
kwargs['content_title'] = 'Downloads'
return render_template('downloads.html', **kwargs, **session)
@app.route('/downloads/<name>/')
def download(name):
#return name
return send_file('static/code/make.py', as_attachment=True)
if __name__ == '__main__':
app.run(debug=True) | 2.1875 | 2 |
legacy/scripts/dx_srjd_calibration.py | pvkraju80/leo | 99 | 12757774 | #
# Calibration of Square-Root Jump Diffusion (SRJD)
# model to VSTOXX call options with DX Analytics
#
# All data from www.eurexchange.com
#
# (c) Dr. <NAME>
# Listed Volatility and Variance Derivatives
#
import dx
import time
import numpy as np
import pandas as pd
import datetime as dt
import scipy.optimize as spo
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import matplotlib
matplotlib.rcParams['font.family'] = 'serif'
from copy import deepcopy
# importing the data
h5 = pd.HDFStore('../data/vstoxx_march_2014.h5', 'r')
vstoxx_index = h5['vstoxx_index']
vstoxx_futures = h5['vstoxx_futures']
vstoxx_options = h5['vstoxx_options']
h5.close()
# collecting the maturity dates
third_fridays = sorted(set(vstoxx_futures['MATURITY']))
# instantiation of market environment object with dummy pricing date
me_vstoxx = dx.market_environment('me_vstoxx', dt.datetime(2014, 1, 1))
me_vstoxx.add_constant('currency', 'EUR')
me_vstoxx.add_constant('frequency', 'W')
me_vstoxx.add_constant('paths', 5000)
# constant short rate model with somewhat arbitrary rate
csr = dx.constant_short_rate('csr', 0.01)
me_vstoxx.add_curve('discount_curve', csr)
# parameters to be calibrated later, dummies only
# SRD part
me_vstoxx.add_constant('kappa', 1.0)
me_vstoxx.add_constant('theta', 20)
me_vstoxx.add_constant('volatility', 1.0)
# jump part
me_vstoxx.add_constant('lambda', 0.5)
me_vstoxx.add_constant('mu', -0.2)
me_vstoxx.add_constant('delta', 0.1)
# payoff function for all European call options
payoff_func = 'np.maximum(maturity_value - strike, 0)'
tol = 0.2 # otm & itm moneyness tolerance
first = True # flag for first calibration
def srjd_get_option_selection(pricing_date, tol=tol):
''' Function to select option quotes from data set.
Parameters
==========
pricing_date: datetime object
date for which the calibration shall be implemented
tol: float
moneyness tolerace for OTM and ITM options to be selected
Returns
=======
option_selection: DataFrame object
selected options quotes
futures: DataFrame object
futures prices at pricing_date
'''
option_selection = pd.DataFrame()
mats = [third_fridays[3],] # list of maturity dates
# select the relevant futures prices
futures = vstoxx_futures[(vstoxx_futures.DATE == pricing_date)
& (vstoxx_futures.MATURITY.apply(lambda x: x in mats))]
# collect option data for the given option maturities
for mat in mats:
forward = futures[futures.MATURITY == mat]['PRICE'].values[0]
option_selection = option_selection.append(
vstoxx_options[(vstoxx_options.DATE == pricing_date)
& (vstoxx_options.MATURITY == mat)
& (vstoxx_options.TYPE == 'C') # only calls
& (vstoxx_options.STRIKE > (1 - tol) * forward)
& (vstoxx_options.STRIKE < (1 + tol) * forward)])
return option_selection, futures
def srd_forward_error(p0):
''' Calculates the mean-squared error for the
term structure calibration for the SRD model part.
Parameters
===========
p0: tuple/list
tuple of kappa, theta, volatility
Returns
=======
MSE: float
mean-squared error
'''
global initial_value, f, t
if p0[0] < 0 or p0[1] < 0 or p0[2] < 0:
return 100
f_model = dx.srd_forwards(initial_value, p0, t)
MSE = np.sum((f - f_model) ** 2) / len(f)
return MSE
def generate_shift_base(pricing_date, futures):
''' Generates the values for the deterministic shift for the
SRD model part.
Parameters
==========
pricing_date: datetime object
date for which the calibration shall be implemented
futures: DataFrame object
futures prices at pricing_date
Returns
=======
shift_base: ndarray object
shift values for the SRD model part
'''
global initial_value, f, t
# futures price array
f = list(futures['PRICE'].values)
f.insert(0, initial_value)
f = np.array(f)
# date array
t = [_.to_pydatetime() for _ in futures['MATURITY']]
t.insert(0, pricing_date)
t = np.array(t)
# calibration to the futures values
opt = spo.fmin(srd_forward_error, (2., 15., 2.))
# calculation of shift values
f_model = dx.srd_forwards(initial_value, opt, t)
shifts = f - f_model
shift_base = np.array((t, shifts)).T
return shift_base
def srjd_get_option_models(pricing_date, option_selection, futures):
''' Function to instantiate option pricing models.
Parameters
==========
pricing_date: datetime object
date for which the calibration shall be implemented
maturity: datetime object
maturity date for the options to be selected
option_selection: DataFrame object
selected options quotes
Returns
=======
vstoxx_model: dx.square_root_diffusion
model object for VSTOXX
option_models: dict
dictionary of dx.valuation_mcs_european_single objects
'''
global initial_value
# updating the pricing date
me_vstoxx.pricing_date = pricing_date
# setting the initial value for the pricing date
initial_value = vstoxx_index['V2TX'][pricing_date]
me_vstoxx.add_constant('initial_value', initial_value)
# setting the final date given the maturity dates
final_date = max(futures.MATURITY).to_pydatetime()
me_vstoxx.add_constant('final_date', final_date)
# adding the futures term structure
me_vstoxx.add_curve('term_structure', futures)
# instantiating the risk factor (VSTOXX) model
vstoxx_model = dx.square_root_jump_diffusion_plus('vstoxx_model',
me_vstoxx)
# generating the shift values and updating the model
vstoxx_model.shift_base = generate_shift_base(pricing_date, futures)
vstoxx_model.update_shift_values()
option_models = {} # dictionary collecting all models
for option in option_selection.index:
# setting the maturity date for the given option
maturity = option_selection['MATURITY'].ix[option]
me_vstoxx.add_constant('maturity', maturity)
# setting the strike for the option to be modeled
strike = option_selection['STRIKE'].ix[option]
me_vstoxx.add_constant('strike', strike)
# instantiating the option model
option_models[option] = \
dx.valuation_mcs_european_single(
'eur_call_%d' % strike,
vstoxx_model,
me_vstoxx,
payoff_func)
return vstoxx_model, option_models
def srjd_calculate_model_values(p0):
''' Returns all relevant option values.
Parameters
===========
p0: tuple/list
tuple of kappa, theta, volatility, lamb, mu, delt
Returns
=======
model_values: dict
dictionary with model values
'''
# read the model parameters from input tuple
kappa, theta, volatility, lamb, mu, delt = p0
# update the option market environment
vstoxx_model.update(kappa=kappa,
theta=theta,
volatility=volatility,
lamb=lamb,
mu=mu,
delt=delt)
# estimate and collect all option model present values
results = [option_models[option].present_value(fixed_seed=True)
for option in option_models]
# combine the results with the option models in a dictionary
model_values = dict(zip(option_models, results))
return model_values
def srjd_mean_squared_error(p0, penalty=True):
''' Returns the mean-squared error given
the model and market values.
Parameters
===========
p0: tuple/list
tuple of kappa, theta, volatility
Returns
=======
MSE: float
mean-squared error
'''
# escape with high value for non-sensible parameter values
if (p0[0] < 0 or p0[1] < 5. or p0[2] < 0 or p0[2] > 10.
or p0[3] < 0 or p0[4] < 0 or p0[5] < 0):
return 1000
# define/access global variables/objects
global option_selection, vstoxx_model, option_models, first, last
# calculate the model values for the option selection
model_values = srjd_calculate_model_values(p0)
option_diffs = {} # dictionary to collect differences
for option in model_values:
# differences between model value and market quote
option_diffs[option] = (model_values[option]
- option_selection['PRICE'].loc[option])
# calculation of mean-squared error
MSE = np.sum(np.array(option_diffs.values()) ** 2) / len(option_diffs)
if first is True:
# if in first optimization, no penalty
penalty = 0.0
else:
# if 2, 3, ... optimization, penalize deviation from previous
# optimal parameter combination
penalty = (np.sum((p0 - last) ** 2))
if penalty is False:
return MSE
return MSE + penalty
def srjd_get_parameter_series(pricing_date_list):
''' Returns parameter series for the calibrated model over time.
Parameters
==========
pricing_date_list: pd.DatetimeIndex
object with relevant pricing dates
Returns
=======
parameters: pd.DataFrame
DataFrame object with parameter series
'''
# define/access global variables/objects
global initial_value, futures, option_selection, vstoxx_model, \
option_models, first, last
parameters = pd.DataFrame() # DataFrame object to collect parameter series
for pricing_date in pricing_date_list:
# setting the initial value for the VSTOXX
initial_value = vstoxx_index['V2TX'][pricing_date]
# select relevant option quotes
option_selection, futures = srjd_get_option_selection(pricing_date)
# instantiate all model given option selection
vstoxx_model, option_models = srjd_get_option_models(pricing_date,
option_selection,
futures)
# global optimization to start with
opt = spo.brute(srjd_mean_squared_error,
((1.25, 6.51, 0.75), # range for kappa
(10., 20.1, 2.5), # range for theta
(0.5, 10.51, 2.5), # range for volatility
(0.1, 0.71, 0.3), # range for lambda
(0.1, 0.71, 0.3), # range for mu
(0.1, 0.21, 0.1)), # range for delta
finish=None)
# local optimization
opt = spo.fmin(srjd_mean_squared_error, opt,
maxiter=550, maxfun=650,
xtol=0.0000001, ftol=0.0000001);
# calculate MSE for storage
MSE = srjd_mean_squared_error(opt, penalty=False)
# store main parameters and results
parameters = parameters.append(
pd.DataFrame(
{'date' : pricing_date,
'initial_value' : vstoxx_model.initial_value,
'kappa' : opt[0],
'theta' : opt[1],
'sigma' : opt[2],
'lambda' : opt[3],
'mu' : opt[4],
'delta' : opt[5],
'MSE' : MSE},
index=[0]), ignore_index=True)
first = False # set to False after first iteration
last = opt # store optimal parameters for reference
print ("Pricing Date %s" % str(pricing_date)[:10]
+ " | MSE %6.5f" % MSE)
return parameters
def srjd_plot_model_fit(parameters):
# last pricing date
pdate = max(parameters.date)
# optimal parameters for that date and the maturity
opt = np.array(parameters[parameters.date == pdate][[
'kappa', 'theta', 'sigma', 'lambda', 'mu', 'delta']])[0]
option_selection, futures = srjd_get_option_selection(pdate, tol=tol)
vstoxx_model, option_models = srjd_get_option_models(pdate,
option_selection,
futures)
model_values = srjd_calculate_model_values(opt)
model_values = pd.DataFrame(model_values.values(),
index=model_values.keys(),
columns=['MODEL'])
option_selection = option_selection.join(model_values)
mats = set(option_selection.MATURITY.values)
mats = sorted(mats)
# arranging the canvas for the subplots
height = max(8, 2 * len(mats))
if len(mats) == 1:
mat = mats[0]
fig, axarr = plt.subplots(2, figsize=(10, height))
os = option_selection[option_selection.MATURITY == mat]
strikes = os.STRIKE.values
axarr[0].set_ylabel('%s' % str(mat)[:10])
axarr[0].plot(strikes, os.PRICE.values, label='Market Quotes')
axarr[0].plot(strikes, os.MODEL.values, 'ro', label='Model Prices')
axarr[0].legend(loc=0)
wi = 0.3
axarr[1].bar(strikes - wi / 2, os.MODEL.values - os.PRICE.values,
width=wi)
axarr[0].set_xlabel('strike')
axarr[1].set_xlabel('strike')
else:
fig, axarr = plt.subplots(len(mats), 2, sharex=True, figsize=(10, height))
for z, mat in enumerate(mats):
os = option_selection[option_selection.MATURITY == mat]
strikes = os.STRIKE.values
axarr[z, 0].set_ylabel('%s' % str(mat)[:10])
axarr[z, 0].plot(strikes, os.PRICE.values, label='Market Quotes')
axarr[z, 0].plot(strikes, os.MODEL.values, 'ro', label='Model Prices')
axarr[z, 0].legend(loc=0)
wi = 0.3
axarr[z, 1].bar(strikes - wi / 2,
os.MODEL.values - os.PRICE.values, width=wi)
axarr[z, 0].set_xlabel('strike')
axarr[z, 1].set_xlabel('strike')
plt.savefig('../images/dx_srjd_cali_1_fit.pdf')
if __name__ is '__main__':
t0 = time.time()
# selecting the dates for the calibration
pricing_date_list = pd.date_range('2014/3/1', '2014/3/31', freq='B')
# conducting the calibration
parameters = srjd_get_parameter_series(pricing_date_list)
# storing the calibation results
date = str(dt.datetime.now())[:10]
h5 = pd.HDFStore('../data/srjd_calibration_%s_%s_%s' %
(me_vstoxx.get_constant('paths'),
me_vstoxx.get_constant('frequency'),
date.replace('-', '_')), 'w')
h5['parameters'] = parameters
h5.close()
# plotting the parameter time series data
fig1, ax1 = plt.subplots(1, figsize=(10, 12))
to_plot = parameters.set_index('date')[
['kappa', 'theta', 'sigma',
'lambda', 'mu', 'delta', 'MSE']]
to_plot.plot(subplots=True, color='b', title='SRJD', ax=ax1)
plt.savefig('../images/dx_srjd_cali_1.pdf')
# plotting the histogram of the MSE values
fig2, ax2 = plt.subplots()
dat = parameters.MSE
dat.hist(bins=30, ax=ax2)
plt.axvline(dat.mean(), color='r', ls='dashed',
lw=1.5, label='mean = %5.4f' % dat.mean())
plt.legend()
plt.savefig('../images/dx_srjd_cali_1_hist.pdf')
# plotting the model fit at last pricing date
srjd_plot_model_fit(parameters)
# measuring and printing the time needed for the script execution
print "Time in minutes %.2f" % ((time.time() - t0) / 60)
| 2.25 | 2 |
MAEnv/env_Cleaner/disjointSet.py | Abluceli/Multi-agent-Reinforcement-Learning-Algorithms | 5 | 12757775 | #! /usr/bin/env python3
'''
Disjoint Set Class that provides basic functionality.
Implemented according the functionality provided here:
https://en.wikipedia.org/wiki/Disjoint-set_data_structure
@author: <NAME> (github.com/138paulmiller)
'''
class DisjointSet:
'''
Disjoint Set : Utility class that helps implement Kruskal MST algorithm
Allows to check whether to keys belong to the same set and to union
sets together
'''
class Element:
def __init__(self, key):
self.key = key
self.parent = self
self.rank = 0
def __eq__(self, other):
return self.key == other.key
def __ne__(self, other):
return self.key != other.key
def __init__(self):
'''
Tree = element map where each node is a (key, parent, rank)
Sets are represented as subtrees whose root is identified with
a self referential parent
'''
self.tree = {}
def make_set(self, key):
'''
Creates a new singleton set.
@params
key : id of the element
@return
None
'''
# Create and add a new element to the tree
e = self.Element(key)
if not key in self.tree.keys():
self.tree[key] = e
def find(self, key):
'''
Finds a given element in the tree by the key.
@params
key(hashable) : id of the element
@return
Element : root of the set which contains element with the key
'''
if key in self.tree.keys():
element = self.tree[key]
# root is element with itself as parent
# if not root continue
if element.parent != element:
element.parent = self.find(element.parent.key)
return element.parent
def union(self, element_a, element_b):
'''
Creates a new set that contains all elements in both element_a and element_b's sets
Pass into union the Elements returned by the find operation
@params
element_a(Element) : Element or key of set a
element_b(Element) : Element of set b
@return
None
'''
root_a = self.find(element_a.key)
root_b = self.find(element_b.key)
# if not in the same subtree (set)
if root_a != root_b:
#merge the sets
if root_a.rank < root_b.rank:
root_a.parent = root_b
elif root_a.rank > root_b.rank:
root_b.parent = root_a
else:
# same rank, set and increment arbitrary root as parent
root_b.parent = root_a
root_a.rank+=1
| 4.03125 | 4 |
meridian/acupoints/dadun41.py | sinotradition/meridian | 5 | 12757776 | #!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
SPELL=u'dàdūn'
CN=u'大敦'
NAME=u'dadun41'
CHANNEL='liver'
CHANNEL_FULLNAME='LiverChannelofFoot-Jueyin'
SEQ='LR1'
if __name__ == '__main__':
pass
| 1.210938 | 1 |
scripts/3_postprocess_baseline_with_mw/3b_plot_tps_rt_two_mw.py | gokul-uf/asl-fall-2017 | 1 | 12757777 | <gh_stars>1-10
from __future__ import print_function
from __future__ import division
from glob import glob
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
thread_range = [8, 16, 32, 64]
def extract_n_plot(filename, label):
num_vc = []
val = []
std = []
with open(filename) as f:
for line in f:
vc, v, s = [float(x) for x in line.strip().split(",")][:3]
num_vc.append(vc)
val.append(v)
std.append(s)
plt.errorbar(x = num_vc, y = val, yerr = std, label = label, capsize = 2)
plt.figure()
plt.title("Response Time vs. Num of Virtual Clients \n (Read-Only Two Middleware Baseline)")
for num_thread in thread_range:
file = "csvs/two_mw_baseline_r-o_t_{}_rt.csv".format(num_thread)
extract_n_plot(filename = file, label = "{} Worker Threads".format(num_thread))
plt.legend()
plt.grid(linestyle = "dotted")
plt.ylabel("Response Time (ms)")
plt.xlabel("Number of Virtual Clients")
plt.ylim(ymin=0)
plt.savefig("3b_two-mw_r-o_rt.png")
plt.figure()
plt.title("Throughput vs. Num of Virtual Clients \n (Read-Only Two Middleware Baseline)")
for num_thread in thread_range:
file = "csvs/two_mw_baseline_r-o_t_{}_tps.csv".format(num_thread)
extract_n_plot(filename = file, label = "{} Worker Threads".format(num_thread))
plt.legend()
plt.grid(linestyle = "dotted")
plt.ylabel("Throughput (request / second)")
plt.xlabel("Number of Virtual Clients")
plt.ylim(ymin=0)
plt.savefig("3b_two-mw_r-o_tps.png")
plt.figure()
plt.title("Queueing Time vs. Num of Virtual Clients \n (Read-Only Two Middleware Baseline)")
for num_thread in thread_range:
file = "csvs/two_mw_baseline_r-o_t_{}_wt.csv".format(num_thread)
extract_n_plot(filename = file, label = "{} Worker Threads".format(num_thread))
plt.legend()
plt.grid(linestyle = "dotted")
plt.ylabel("Queueing Time (ms)")
plt.xlabel("Number of Virtual Clients")
plt.ylim(ymin=0)
plt.savefig("3b_two-mw_r-o_wt.png")
plt.figure()
plt.title("Response Time vs. Num of Virtual Clients \n (Write-Only Two Middleware Baseline)")
for num_thread in thread_range:
file = "csvs/two_mw_baseline_w-o_t_{}_rt.csv".format(num_thread)
extract_n_plot(filename = file, label = "{} Worker Threads".format(num_thread))
plt.legend()
plt.grid(linestyle = "dotted")
plt.ylim(ymin=0)
plt.ylabel("Response Time (ms)")
plt.xlabel("Number of Virtual Clients")
plt.savefig("3b_two-mw_w-o_rt.png")
plt.figure()
plt.title("Throughput vs. Num of Virtual Clients \n (Write-Only Two Middleware Baseline)")
for num_thread in thread_range:
file = "csvs/two_mw_baseline_w-o_t_{}_tps.csv".format(num_thread)
extract_n_plot(filename = file, label = "{} Worker Threads".format(num_thread))
plt.legend()
plt.grid(linestyle = "dotted")
plt.ylim(ymin=0)
plt.ylabel("Throughput (request / second)")
plt.xlabel("Number of Virtual Clients")
plt.savefig("3b_two-mw_w-o_tps.png")
plt.figure()
plt.title("Queueing Time vs. Num of Virtual Clients \n (Write-Only Two Middleware Baseline)")
for num_thread in thread_range:
file = "csvs/two_mw_baseline_w-o_t_{}_wt.csv".format(num_thread)
extract_n_plot(filename = file, label = "{} Worker Threads".format(num_thread))
plt.legend()
plt.grid(linestyle = "dotted")
plt.ylim(ymin=0)
plt.ylabel("Queueing Time (ms)")
plt.xlabel("Number of Virtual Clients")
plt.savefig("3b_two-mw_w-o_wt.png") | 2.5 | 2 |
lino/modlib/extjs/__init__.py | khchine5/lino | 1 | 12757778 | # -*- coding: UTF-8 -*-
# Copyright 2009-2016 <NAME>
# License: BSD (see file COPYING for details)
"""Adds the default Lino user interface based on ExtJS.
It is being automatically included by every Lino application unless
you disable it (e.g. by overriding your :meth:`get_apps_modifiers
<lino.core.site.Site.get_apps_modifiers>` or your
:meth:`get_installed_apps <lino.core.site.Site.get_installed_apps>`
method).
When your Lino application uses the ExtJS user interface, then you may
need a `commercial license from Sencha
<https://www.sencha.com/store/extjs/>`__. Summary without warranty of
correctness: you need a commercial license if (1) your application is
not available under the GPL **and** (2) your site is used by other
people than the empoyees of the company who is the copyright holder of
your application.
.. autosummary::
:toctree:
views
ext_renderer
"""
from __future__ import unicode_literals
from __future__ import print_function
from lino.api.ad import Plugin
from django.utils.translation import ugettext_lazy as _
# raise Exception("20160528")
class Plugin(Plugin):
"""Extends :class:`lino.core.plugin.Plugin`.
"""
needs_plugins = ['lino.modlib.bootstrap3']
enter_submits_form = False
"""Whether the :kbd:`ENTER` key (or :kbd:`CTRL+ENTER` when in a
textarea field) should submit the form.
The default is `False`. For sites that were in production before
October 2015, we recommend to explain to the users that a simple
:kbd:`ENTER` no longer submits a form, and that :kbd:`Ctrl-S` is
the preferred keyboard shortcut for submitting a
form. Alternatively for backwards compatibility you can set it to
`True` using something like this::
def setup_plugins(self):
super(Site, self).setup_plugins()
if self.is_installed('extjs'):
self.plugins.extjs.configure(enter_submits_form=False)
When you set this to `True` :xfile:`linoweb.js` adds a special
mapping for :kbd:`ENTER`. The problem then is that the
:kbd:`ENTER` key won't work in a plain textarea field because we
didn't find a way to restore the default behaviour.
"""
ui_label = _("Admin")
use_statusbar = False
"""
Whether to use a status bar to display certain messages to the user.
Default is `False` since currently this is not really useful.
"""
url_prefix = "ext"
media_name = 'ext-3.3.1'
# media_base_url = "http://extjs-public.googlecode.com/" + \
# "svn/tags/extjs-3.3.1/release/"
"""The URL from where to include the ExtJS library files.
The default value points to the `extjs-public
<http://code.google.com/p/extjs-public/>`_ repository and thus
requires the clients to have an internet connection. This
relieves newcomers from the burden of having to specify a download
location in their :xfile:`settings.py`.
On a production site you'll probably want to download and serve
these files yourself by setting this to `None` and setting
:attr:`extjs_root` (or a symbolic link "extjs" in your
:xfile:`media` directory) to point to the local directory where
ExtJS 3.3.1 is installed).
"""
autorefresh_seconds = 0
# autorefresh_seconds = 60
"""Number of seconds to wait between two refreshes when autorefresh is
activated. Default is 60. Set this to 0 in order to deactivate
the autorefresh button.
"""
media_root = None
"""
Path to the ExtJS root directory. Only used when
:attr:`media_base_url` is None, and when the `media` directory has
no symbolic link named `extjs` pointing to the ExtJS root
directory.
"""
ui_handle_attr_name = 'extjs_handle'
def on_ui_init(self, kernel):
# logger.info("20140227 extjs.Plugin.on_ui_init() a")
from .ext_renderer import ExtRenderer
self.renderer = ExtRenderer(self)
kernel.extjs_renderer = self.renderer
# added 20160329
for fl in self.renderer.param_panels:
fl.get_layout_handle(self)
# logger.info("20140227 extjs.Plugin.on_ui_init() b")
def get_row_edit_lines(self, e, panel):
from lino.core.elems import (
GridElement, HtmlBoxElement, FieldElement, form_field_name)
from lino.core import constants
master_field = panel.layout_handle.layout._datasource.master_field
if isinstance(e, GridElement):
yield "%s.on_master_changed();" % e.as_ext()
elif isinstance(e, HtmlBoxElement):
yield "%s.refresh();" % e.as_ext()
elif isinstance(e, FieldElement):
if not panel.layout_handle.layout.editable:
return
holder = panel.layout_handle.layout.get_chooser_holder()
chooser = holder.get_chooser_for_field(e.field.name)
if not chooser:
return
for f in chooser.context_fields:
if master_field and master_field.name == f.name:
yield "var bp = this.get_base_params();"
yield "%s.setContextValue('%s',bp['%s']);" % (
e.as_ext(), constants.URL_PARAM_MASTER_PK,
constants.URL_PARAM_MASTER_PK)
yield "%s.setContextValue('%s',bp['%s']);" % (
e.as_ext(), constants.URL_PARAM_MASTER_TYPE,
constants.URL_PARAM_MASTER_TYPE)
else:
yield (
"%s.setContextValue('%s', record ? record."
"data['%s'] : undefined);" % (
e.as_ext(), f.name, form_field_name(f)))
def get_css_includes(self, site):
yield self.build_lib_url('resources/css/ext-all.css')
def get_js_includes(self, settings, language):
return []
def get_head_lines(self, site, request):
yield "<style>"
from lino.core.constants import ICON_NAMES
tpl = ".x-tbar-{0}{{ background-image: url({1}) !important; }}"
for n in ICON_NAMES:
url = site.build_static_url('images', 'mjames', n + '.png')
yield tpl.format(n, url)
yield """
.x-tbar-done{ background-image: url(/static/images/mjames/accept.png) !important; }
.x-tbar-parameters{ background-image: url(/static/images/mjames/database_gear.png) !important; }
"""
yield "</style>"
def get_used_libs(self, html=False):
if html is not None:
# version = '<script type="text/javascript">\
# document.write(Ext.version);</script>'
onclick = "alert('ExtJS client version is ' + Ext.version);"
tip = "Click to see ExtJS client version"
text = "(version)"
version = html.a(text, href='#', onclick=onclick, title=tip)
yield ("ExtJS", version, "http://www.sencha.com")
yield ("Silk Icons", '1.3',
"http://www.famfamfam.com/lab/icons/silk/")
def get_index_view(self):
from . import views
return views.AdminIndex.as_view()
def get_patterns(self):
from django.conf import settings
from django.conf.urls import url # patterns
from . import views
self.renderer.build_site_cache()
rx = '^'
urlpatterns = [
# url(rx + '/?$', views.AdminIndex.as_view()),
url(rx + '$', views.AdminIndex.as_view()),
url(rx + r'api/main_html$', views.MainHtml.as_view()),
# url(rx + r'auth$', views.Authenticate.as_view()),
url(rx + r'grid_config/(?P<app_label>\w+)/(?P<actor>\w+)$',
views.GridConfig.as_view()),
url(rx + r'api/(?P<app_label>\w+)/(?P<actor>\w+)$',
views.ApiList.as_view()),
url(rx + r'api/(?P<app_label>\w+)/(?P<actor>\w+)/(?P<pk>.+)$',
views.ApiElement.as_view()),
url(rx + r'restful/(?P<app_label>\w+)/(?P<actor>\w+)$',
views.Restful.as_view()),
url(rx + r'restful/(?P<app_label>\w+)/(?P<actor>\w+)/(?P<pk>.+)$',
views.Restful.as_view()),
url(rx + r'choices/(?P<app_label>\w+)/(?P<rptname>\w+)$',
views.Choices.as_view()),
url(rx + r'choices/(?P<app_label>\w+)/(?P<rptname>\w+)/'
'(?P<fldname>\w+)$',
views.Choices.as_view()),
url(rx + r'apchoices/(?P<app_label>\w+)/(?P<actor>\w+)/'
'(?P<an>\w+)/(?P<field>\w+)$',
views.ActionParamChoices.as_view()),
# the thread_id can be a negative number:
url(rx + r'callbacks/(?P<thread_id>[\-0-9a-zA-Z]+)/'
'(?P<button_id>\w+)$',
views.Callbacks.as_view())
]
if settings.SITE.use_eid_applet:
urlpatterns.append(
url(rx + r'eid-applet-service$',
views.EidAppletService.as_view()))
if settings.SITE.use_jasmine:
urlpatterns.append(
url(rx + r'run-jasmine$', views.RunJasmine.as_view()))
return urlpatterns
| 1.75 | 2 |
tests/unit/ones_inplace.py | microsoft/torchy | 4 | 12757779 | <gh_stars>1-10
from testdriver import *
x = torch.ones(3)
y = torch.tensor(((5.,6.,1.)))
x.add_(y)
w = torch.add(x, y)
x = None
print(w)
print(y)
| 2.015625 | 2 |
Mundo 2 - Estruturas de Controle/ex053.py | diegomcosta/Curso-em-Video-Python | 0 | 12757780 | frase = input("Digite uma frase: ").strip().upper()
frase = frase.replace(" ","")
inverso = frase[::-1]
print(f"O inverso de {frase} é {inverso}")
if (frase == inverso):
print("A frase digitada é um palíndromo!")
else:
print("A frase digitada não é um palíndromo!") | 4.03125 | 4 |
modules/module0/02_datastructures_and_geometry/cross_1a.py | tetov/ITA19 | 7 | 12757781 | <reponame>tetov/ITA19<filename>modules/module0/02_datastructures_and_geometry/cross_1a.py
from compas.geometry import cross_vectors
from compas.geometry import angle_vectors
u = [1.0, 0.0, 0.0]
v = [0.0, 1.0, 0.0]
uxv = cross_vectors(u, v)
u_uxv = angle_vectors(u, uxv)
v_uxv = angle_vectors(v, uxv)
print(u_uxv)
print(v_uxv)
| 2.296875 | 2 |
scripts/ven_prep.py | MalcolmorianVII/Nanopore-only-SNP-and-Indel-calling-Nosc- | 0 | 12757782 | import pandas as pd
import os.path
import csv
clair = '/Users/malcolmorian/Documents/Bioinformatics/Projects2021/Guppy3Guppy5/NOSC/nosc_clair/2022.01.02/clair_vcfData'
pepper = '/Users/malcolmorian/Documents/Bioinformatics/Projects2021/Guppy3Guppy5/NOSC/nosc_pepper/2022.01.02/pepper_vcfData'
gatk = '/Users/malcolmorian/Documents/Bioinformatics/Projects2021/Guppy3Guppy5/NOSC/nosc_gatk/2022.01.05/gatk_vcfData'
venned ='/Users/malcolmorian/Documents/Bioinformatics/Projects2021/Guppy3Guppy5/NOSC/vennTextFiles'
# os.mkdir(venned)
def write(data_df,toWrite):
with open(toWrite,'w') as f:
# print(toWrite)
# if os.path.exists(toWrite):
# print('NOT WRITING ....THE FILE ALREADY EXISTS')
data_df.to_csv(toWrite,sep=' ', index=False, header=False)
def read_file(directory):
files = os.listdir(directory)
print(files)
for file in files:
if file.endswith('.xlsx'):
path = f'{directory}/{file}'
print(path)
data_df = pd.read_excel(path, engine='openpyxl')
data_df = data_df[['#CHROM','POS']]
write(data_df, f'{venned}/{file}.csv')
else:
print('NOT THIS ONE!!!!!!!!!!!')
continue
read_file(clair)
read_file(gatk)
read_file(pepper)
| 3.140625 | 3 |
src/sound.py | ytyaru/Python.Pyxel.Tetris.20200424070000 | 0 | 12757783 | <filename>src/sound.py
#!/usr/bin/env python3
# coding: utf8
import enum, numpy, random, pyxel
from abc import ABCMeta, abstractmethod
class Sound:
def __init__(self):
self.scale = IonianScale()
self.is_mute = False
def sound(self, mino_id):
if self.is_mute: return
notes = Note.get(self.scale.Keys[mino_id])
tone = 'p'
volume = '6'
effect = 'f'
speed = 30
# print(notes)
sound_bank = 0
pyxel.sound(sound_bank).set(
notes,
tone,
volume,
effect,
speed,
)
channel = 0
pyxel.play(channel, sound_bank)
def increment_key(self):
self.scale.increment_key()
def decrement_key(self):
self.scale.decrement_key()
def toggle_mute(self):
if self.is_mute: self.is_mute = False
else: self.is_mute = True
@property
def IsMute(self): return self.is_mute
class Key:
Min = 0
Max = 11
Len = 12
Names = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
class IonianScale: # https://ja.wikipedia.org/wiki/%E9%9F%B3%E9%9A%8E
def __init__(self):
self.key = 0
self.intervals = (0, 2, 4, 5, 7, 9, 11)
@property
def Key(self): return self.key
@Key.setter
def Key(self, value):
if Key.Min <= value <= Key.Max: self.key = value
@property
def Keys(self): return [i+self.key for i in self.intervals]
def increment_key(self):
if Key.Max < self.key: self.key = Key.Min
def decrement_key(self):
if self.key < Key.Min: self.key = Key.Max
class Note:
@staticmethod
def get(note):
key = note % 12
pitch = 1 + (note // 12)
return Key.Names[key] + str(pitch)
if __name__ == '__main__':
class App:
def __init__(self):
pyxel.init(10,10)
Sound().sound(0)
pyxel.run(self.update, self.draw)
def draw(self): pyxel.cls(0)
def update(self): pass
App()
| 3.09375 | 3 |
utils/data_utils.py | mehtadhruv1/comparatively-finetuning-bert | 74 | 12757784 | """
Script containing various utilities related to data processing and cleaning. Includes tokenization,
text cleaning, feature extractor (token type IDs & attention masks) for BERT, and IMDBDataset.
"""
import logging
import torch
from torch.utils.data import Dataset
import os
import pickle
import re
import numpy as np
from tqdm import trange
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
# Setup stopwords list & word (noun, adjective, and verb) lemmatizer
stop_words = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()
def clean_text(text):
"""Function to clean text using RegEx operations, removal of stopwords, and lemmatization."""
text = re.sub(r'[^\w\s]', '', text, re.UNICODE)
text = text.lower()
text = [lemmatizer.lemmatize(token) for token in text.split(' ')]
text = [lemmatizer.lemmatize(token, 'v') for token in text]
text = [word for word in text if word not in stop_words]
text = ' '.join(text)
text = text.lstrip().rstrip()
text = re.sub(' +', ' ', text)
return text
def tokenize_and_encode(text, tokenizer, apply_cleaning=False, max_tokenization_length=512,
truncation_method='head-only', split_head_density=0.5):
"""
Function to tokenize & encode a given text.
@param (str) text: a sequence of words to be tokenized in raw string format
@param (pytorch_transformers.BertTokenizer) tokenizer: tokenizer with pre-figured mappings
@param (bool) apply_cleaning: whether or not to perform common cleaning operations on texts;
note that enabling only makes sense if language of the task is English (default: False)
@param (int) max_tokenization_length: maximum number of positional embeddings, or the sequence
length of an example that will be fed to BERT model (default: 512)
@param (str) truncation_method: method that will be applied in case the text exceeds
@max_tokenization_length; currently implemented methods include 'head-only', 'tail-only',
and 'head+tail' (default: 'head-only')
@param (float) split_head_density: weight on head when splitting between head and tail, only
applicable if @truncation_method='head+tail' (default: 0.5)
@return (list) input_ids: the encoded integer indexes of the given text; note that
get_data_iterators() function converts this to a Tensor under the hood
"""
if apply_cleaning:
text = clean_text(text=text)
# Tokenize and encode
tokenized_text = tokenizer.tokenize(text)
input_ids = tokenizer.convert_tokens_to_ids(tokenized_text)
# Subtract 2 ([CLS] and[SEP] tokens) to get the actual text tokenization length
text_tokenization_length = max_tokenization_length - 2
# Truncate sequences with the specified approach
if len(input_ids) > text_tokenization_length:
# i) Head-Only Approach: Keep the first N tokens
if truncation_method == 'head-only':
input_ids = input_ids[:text_tokenization_length]
# ii) Tail-Only Approach: Keep the last N tokens
elif truncation_method == 'tail-only':
input_ids = input_ids[-text_tokenization_length:]
# iii) Head+Tail Approach: Keep the first F tokens and last L tokens where F + L = N
elif truncation_method == 'head+tail':
head_tokenization_length = int(text_tokenization_length * split_head_density)
tail_tokenization_length = text_tokenization_length - head_tokenization_length
input_head_ids = input_ids[:head_tokenization_length]
input_tail_ids = input_ids[-tail_tokenization_length:]
input_ids = input_head_ids + input_tail_ids
# Plug in CLS & SEP special tokens for identification of start & end points of sequences
cls_id = tokenizer.convert_tokens_to_ids('[CLS]')
sep_id = tokenizer.convert_tokens_to_ids('[SEP]')
input_ids = [cls_id] + input_ids + [sep_id]
# Pad sequences & corresponding masks and features
pad_id = tokenizer.convert_tokens_to_ids('[PAD]')
if len(input_ids) < max_tokenization_length:
padding_length = max_tokenization_length - len(input_ids)
input_ids = input_ids + ([pad_id] * padding_length)
# Check if input is in correct length
# assert len(input_ids) == max_tokenization_length
return input_ids
def get_features(input_ids, tokenizer, device):
"""
Function to get BERT-related features, and helps to build the total input representation.
@param (Tensor) input_ids: the encoded integer indexes of a batch, with shape: (B, P)
@param (pytorch_transformers.BertTokenizer) tokenizer: tokenizer with pre-figured mappings
@param (torch.device) device: 'cpu' or 'gpu', decides where to store the outputted tensors
@return (Tensor, Tensor) token_type_ids, attention_mask: features describe token type with
a 0 for the first sentence and a 1 for the pair sentence; enable attention on a
particular token with a 1 or disable it with a 0
"""
token_type_ids, attention_mask = [], []
# Iterate over batch
for input_ids_example in input_ids:
# Convert tensor to a 1D list
input_ids_example = input_ids_example.squeeze().tolist()
# Set example to whole input when batch size is 1
if input_ids.shape[0] == 1:
input_ids_example = input_ids.squeeze().tolist()
# Get padding information
padding_token_id = tokenizer.convert_tokens_to_ids('[PAD]')
padding_length = input_ids_example.count(padding_token_id)
text_length = len(input_ids_example) - padding_length
# Get segment IDs -> all 0s for one sentence, which is the case for sequence classification
token_type_ids_example = [0] * len(input_ids_example)
# Get input mask -> 1 for real tokens, 0 for padding tokens
attention_mask_example = ([1] * text_length) + ([0] * padding_length)
# Check if features are in correct length
assert len(token_type_ids_example) == len(input_ids_example)
assert len(attention_mask_example) == len(input_ids_example)
token_type_ids.append(token_type_ids_example)
attention_mask.append(attention_mask_example)
# Convert lists to tensors
token_type_ids = torch.tensor(data=token_type_ids, device=device)
attention_mask = torch.tensor(data=attention_mask, device=device)
return token_type_ids, attention_mask
class IMDBDataset(Dataset):
"""
IMDB Dataset for easily iterating over and performing common operations.
@param (str) input_directory: path of directory where the desired data exists
@param (pytorch_transformers.BertTokenizer) tokenizer: tokenizer with pre-figured mappings
@param (bool) apply_cleaning: whether or not to perform common cleaning operations on texts;
note that enabling only makes sense if language of the task is English
@param (int) max_tokenization_length: maximum number of positional embeddings, or the sequence
length of an example that will be fed to BERT model (default: 512)
@param (str) truncation_method: method that will be applied in case the text exceeds
@max_tokenization_length; currently implemented methods include 'head-only', 'tail-only',
and 'head+tail' (default: 'head-only')
@param (float) split_head_density: weight on head when splitting between head and tail, only
applicable if @truncation_method='head+tail' (default: 0.5)
@param (torch.device) device: 'cpu' or 'gpu', decides where to store the data tensors
"""
def __init__(self, input_directory, tokenizer, apply_cleaning, max_tokenization_length,
truncation_method='head-only', split_head_density=0.5, device='cpu'):
super(IMDBDataset).__init__()
self.positive_path = os.path.join(input_directory, 'pos')
self.positive_files = [f for f in os.listdir(self.positive_path)
if os.path.isfile(os.path.join(self.positive_path, f))]
self.num_positive_examples = len(self.positive_files)
self.positive_label = 1
self.negative_path = os.path.join(input_directory, 'neg')
self.negative_files = [f for f in os.listdir(self.negative_path)
if os.path.isfile(os.path.join(self.negative_path, f))]
self.num_negative_examples = len(self.negative_files)
self.negative_label = 0
self.tokenizer = tokenizer
self.apply_cleaning = apply_cleaning
self.max_tokenization_length = max_tokenization_length
self.truncation_method = truncation_method
self.split_head_density = split_head_density
self.device = device
# Pre-tokenize & encode examples
self.pre_tokenize_and_encode_examples()
def pre_tokenize_and_encode_examples(self):
"""
Function to tokenize & encode examples and save the tokenized versions to a separate folder.
This way, we won't have to perform the same tokenization and encoding ops every epoch.
"""
if not os.path.exists(os.path.join(self.positive_path, 'tokenized_and_encoded')):
os.mkdir(os.path.join(self.positive_path, 'tokenized_and_encoded'))
# Clean & tokenize positive reviews
for i in trange(len(self.positive_files), desc='Tokenizing & Encoding Positive Reviews',
leave=True):
file = self.positive_files[i]
with open(os.path.join(self.positive_path, file), mode='r', encoding='utf8') as f:
example = f.read()
example = re.sub(r'<br />', '', example)
example = example.lstrip().rstrip()
example = re.sub(' +', ' ', example)
example = tokenize_and_encode(text=example,
tokenizer=self.tokenizer,
apply_cleaning=self.apply_cleaning,
max_tokenization_length=self.max_tokenization_length,
truncation_method=self.truncation_method,
split_head_density=self.split_head_density)
with open(os.path.join(self.positive_path, 'tokenized_and_encoded', file), mode='wb') as f:
pickle.dump(obj=example, file=f)
else:
logging.warning('Tokenized positive reviews directory already exists!')
if not os.path.exists(os.path.join(self.negative_path, 'tokenized_and_encoded')):
os.mkdir(os.path.join(self.negative_path, 'tokenized_and_encoded'))
# Clean & tokenize negative reviews
for i in trange(len(self.negative_files), desc='Tokenizing & Encoding Negative Reviews',
leave=True):
file = self.negative_files[i]
with open(os.path.join(self.negative_path, file), mode='r', encoding='utf8') as f:
example = f.read()
example = re.sub(r'<br />', '', example)
example = example.lstrip().rstrip()
example = re.sub(' +', ' ', example)
example = tokenize_and_encode(text=example,
tokenizer=self.tokenizer,
apply_cleaning=self.apply_cleaning,
max_tokenization_length=self.max_tokenization_length,
truncation_method=self.truncation_method,
split_head_density=self.split_head_density)
with open(os.path.join(self.negative_path, 'tokenized_and_encoded', file), mode='wb') as f:
pickle.dump(obj=example, file=f)
else:
logging.warning('Tokenized negative reviews directory already exists!')
def __len__(self):
return len(self.positive_files) + len(self.negative_files)
def __getitem__(self, index):
if index < self.num_positive_examples:
file = self.positive_files[index]
label = torch.tensor(data=self.positive_label, dtype=torch.long).to(self.device)
with open(os.path.join(self.positive_path, 'tokenized_and_encoded', file), mode='rb') as f:
example = pickle.load(file=f)
elif index >= self.num_positive_examples:
file = self.negative_files[index-self.num_positive_examples]
label = torch.tensor(data=self.negative_label, dtype=torch.long).to(self.device)
with open(os.path.join(self.negative_path, 'tokenized_and_encoded', file), mode='rb') as f:
example = pickle.load(file=f)
else:
raise ValueError('Out of range index while accessing dataset')
return torch.from_numpy(np.array(example)).long().to(self.device), label
| 2.796875 | 3 |
falmer/commercial/wagtail_hooks.py | sussexstudent/services-api | 2 | 12757785 | <filename>falmer/commercial/wagtail_hooks.py<gh_stars>1-10
from wagtail.contrib.modeladmin.options import ModelAdmin, ModelAdminGroup, modeladmin_register
from .models import Offer, OfferCategory
class OfferCategoryAdmin(ModelAdmin):
model = OfferCategory
menu_icon = 'site'
menu_order = 250
add_to_settings_menu = False
exclude_from_explorer = False
list_display = ('name', )
search_fields = ('name', )
class OfferAdmin(ModelAdmin):
model = Offer
menu_icon = 'site'
menu_order = 250
add_to_settings_menu = False
exclude_from_explorer = False
list_display = ('deal_tag', 'company_name')
search_fields = ('deal_tag', 'company_name')
class CommercialAdminGroup(ModelAdminGroup):
menu_label = 'Commercial'
menu_icon = 'folder-open-inverse'
menu_order = 260
items = (OfferAdmin, OfferCategoryAdmin, )
modeladmin_register(CommercialAdminGroup)
| 1.742188 | 2 |
mazesolver/src/pathfinding.py | LonelyDriver/Mazesolver | 0 | 12757786 | <reponame>LonelyDriver/Mazesolver
import json
import logging
import sys
from mazesolver.src.exceptions import MazeparsingError
logger = logging.getLogger("App")
s_handler = logging.StreamHandler(sys.stdout)
f_handler = logging.FileHandler("pathfinding.log")
s_handler.setLevel(logging.DEBUG)
f_handler.setLevel(logging.ERROR)
s_format = logging.Formatter(
"%(asctime)s.%(msecs)03d Function: "
"%(funcName)s %(levelname)s: %(message)s",
"%d.%m.%Y %H:%M:%S")
f_format = logging.Formatter(
"%(asctime)s.%(msecs)03d Function: "
"%(funcName)s %(levelname)s: %(message)s",
"%d.%m.%Y %H:%M:%S")
s_handler.setFormatter(s_format)
f_handler.setFormatter(f_format)
logger.addHandler(s_handler)
logger.addHandler(f_handler)
logger.setLevel(logging.DEBUG)
class Node:
'''
This class represents a node. Every node has n neighbours
which can be reached from the current node.
Every node has a unique id.
'''
def __init__(self, id: int, pos: tuple, obstacle: bool):
"""
param id Unique node id
param pos X and Y position on map
"""
self._id = id
self._pos = pos
self._obstacle = obstacle
self._neighbours = None
@property
def Neighbours(self) -> list:
return self._neighbours
@Neighbours.setter
def Neighbours(self, value: list):
self._neighbours = value
@property
def Id(self) -> int:
return self._id
@Id.setter
def Id(self, value: int):
self._id = id
@property
def Pos(self) -> tuple:
return self._pos
@Pos.setter
def Pos(self, pos: tuple):
self._pos = pos
@property
def Obstacle(self) -> bool:
return self._obstacle
@Obstacle.setter
def Obstace(self, obstacle):
self._obstacle = obstacle
class MazeDto:
def __init__(self):
self._maze = []
self._nodes = []
self._start_node = None
self._end_node = None
@property
def Maze(self) -> list:
return self._maze
@Maze.setter
def Maze(self, maze: str):
self._maze = maze
@property
def Nodes(self) -> list:
return self._nodes
@Nodes.setter
def Nodes(self, nodes):
self._nodes = nodes
@property
def StartNode(self) -> Node:
return self._start_node
@StartNode.setter
def StartNode(self, start_node: Node):
self._start_node = start_node
@property
def EndNode(self) -> Node:
return self._end_node
@EndNode.setter
def EndNode(self, end_node: Node):
self._end_node = end_node
class MazeParser:
def __init__(self):
self._maze = MazeDto()
self._width = None
self._max_tiles = None
self._start = None
self._end = None
self._obstacle = None
self._nodes = list()
def GetMazeParameters(self) -> MazeDto:
return self._maze
def InitializeFromJson(self, map_json: str):
if len(map_json) < 1:
logger.warning("Invalid map json")
raise MazeparsingError(RuntimeError("Length of json object < 1"),
self.InitializeFromJson.__name__)
try:
self._initializeMembersfromJson(map_json)
except TypeError as e:
logger.exception("TypeError: {}".format(e))
raise MazeparsingError(e, self._initializeMembersfromJson.__name__)
except json.JSONDecodeError as e:
logger.exception(e.msg)
raise MazeparsingError(e, self._initializeMembersfromJson.__name__)
except KeyError as err:
logger.exception("KeyError: {}".format(err))
raise MazeparsingError(err, self._initializeMembersfromJson.__name__)
def LoadJsonFileAndInitialize(self, filename: str):
try:
maze_json = self._loadFile(filename)
self._initializeMembersfromJson(maze_json)
except OSError as err:
logger.exception("OS error: {}".format(err))
raise MazeparsingError(err, self.InitializeFromJson.__name__)
except KeyError as err:
logger.exception("KeyError: {}".format(err))
raise MazeparsingError(err, self.InitializeFromJson.__name__)
def _loadFile(self, filename) -> str:
with open(filename, 'r') as f:
map_json = f.read()
return map_json
def _initializeMembersfromJson(self, map_json: str):
file = json.loads(map_json)
logger.debug("File: %s", file)
self._map = file['Map']
self._maze.Maze = file['Map']
logger.debug("Map: %s", self._map)
self._start = file['Start']
logger.debug("Start: %s", self._start)
self._end = file['End']
logger.debug("End: %s", self._end)
self._width = len(self._map[0])
logger.debug("Cols: %s", self._width)
self._max_tiles = len(self._map) * self._width
logger.debug("Tiles: %s", self._max_tiles)
try:
self._obstacle = file['Obstacle']
logger.debug("Obstacles: %s", self._obstacle)
except KeyError as e:
logger.exception("KeyError: {}".format(e))
raise MazeparsingError(e, self._initializeMembersfromJson.__name__)
def CreateNodes(self):
if len(self._maze.Maze) < 1:
logger.error("No map loaded")
raise MazeparsingError(RuntimeError("No map loaded"), self.CreateNodes.__name__)
self._createNodes()
self._findNodeNeighbours()
def _createNodes(self):
nodes = []
for y, row in enumerate(self._maze.Maze):
for x, col in enumerate(row):
id = y*10+x
pos = (x, y)
obstacle = (col == self._obstacle)
node = Node(id, pos, obstacle)
self._nodes.append(node)
nodes.append(node)
if col == self._start:
self._maze.StartNode = node
logger.debug("Found start node: {}".format(node.Pos))
elif col == self._end:
self._maze.EndNode = node
logger.debug("Found end node: {}".format(node.Pos))
self._maze.Nodes = nodes
def _findNodeNeighbours(self):
try:
for index, node in enumerate(self._maze.Nodes):
n = list()
n.append(index - self._width)
n.append(index + self._width)
if not (index % self._width == 0):
n.append(index - 1)
if not (index % self._width == self._width-1):
n.append(index + 1)
neighbours = [self._maze.Nodes[index] for index in n
if index >= 0 and index < self._max_tiles
and not self._maze.Nodes[index].Obstacle]
node.Neighbours = neighbours
except KeyError as err:
logger.exception("KeyError: {}".format(err))
raise MazeparsingError(err, self._findNodeNeighbours.__name__)
| 2.84375 | 3 |
unsupervised_lensing/models/DCAE_Nets.py | DeepLense-Unsupervised/unsupervised-lensing | 8 | 12757787 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class DCA(nn.Module):
def __init__(self, no_channels=1):
super(DCA, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(no_channels, 16, 7, stride=3, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, 7, stride=3, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, 7),
nn.Flatten(),
nn.Linear(5184, 1000),
nn.BatchNorm1d(1000),
nn.Linear(1000, 5184)
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(64, 32, 7),
nn.ReLU(),
nn.ConvTranspose2d(32, 16, 7, stride=3, padding=1, output_padding=2),
nn.ReLU(),
nn.ConvTranspose2d(16, no_channels, 6, stride=3, padding=1, output_padding=2),
nn.Tanh()
)
def forward(self, x):
x = self.encoder(x)
x = x.reshape(-1,64,9,9)
x = self.decoder(x)
return x
| 2.71875 | 3 |
projectCreation/import_images.py | MattSkiff/aerial_wildlife_detection | 166 | 12757788 | <filename>projectCreation/import_images.py
'''
Helper function that imports a set of unlabeled images into the database.
Works recursively (i.e., with images in nested folders) and different file
formats and extensions (.jpg, .JPEG, .png, etc.).
Skips images that have already been added to the database.
Using this script requires the following steps:
1. Make sure your images are of common format and readable by the web
server (i.e., convert camera RAW images first).
2. Copy your image folder into the FileServer's root file directory (i.e.,
corresponding to the path under "staticfiles_dir" in the configuration
*.ini file).
3. Call the script from the AIDE code base on the FileServer instance.
2019-21 <NAME>
'''
import os
import argparse
from psycopg2 import sql
from util.helpers import VALID_IMAGE_EXTENSIONS, listDirectory
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Import images into database.')
parser.add_argument('--project', type=str,
help='Shortname of the project to insert the images into.')
parser.add_argument('--settings_filepath', type=str, default='config/settings.ini', const=1, nargs='?',
help='Manual specification of the directory of the settings.ini file; only considered if environment variable unset (default: "config/settings.ini").')
args = parser.parse_args()
# setup
print('Setup...')
if not 'AIDE_CONFIG_PATH' in os.environ:
os.environ['AIDE_CONFIG_PATH'] = str(args.settings_filepath)
from tqdm import tqdm
import datetime
from util.configDef import Config
from modules import Database
currentDT = datetime.datetime.now()
currentDT = '{}-{}-{} {}:{}:{}'.format(currentDT.year, currentDT.month, currentDT.day, currentDT.hour, currentDT.minute, currentDT.second)
config = Config()
dbConn = Database(config)
if not dbConn.canConnect():
raise Exception('Error connecting to database.')
project = args.project
# check if running on file server
imgBaseDir = config.getProperty('FileServer', 'staticfiles_dir')
if not os.path.isdir(imgBaseDir):
raise Exception(f'"{imgBaseDir}" is not a valid directory on this machine. Are you running the script from the file server?')
if not imgBaseDir.endswith(os.sep):
imgBaseDir += os.sep
# locate all images and their base names
print('Locating image paths...')
imgs = set()
imgFiles = listDirectory(imgBaseDir, recursive=True) #glob.glob(os.path.join(imgBaseDir, '**'), recursive=True) #TODO: check if correct
imgFiles = list(imgFiles)
for i in tqdm(imgFiles):
if os.path.isdir(i):
continue
_, ext = os.path.splitext(i)
if ext.lower() not in VALID_IMAGE_EXTENSIONS:
continue
baseName = i.replace(imgBaseDir, '')
imgs.add(baseName)
# ignore images that are already in database
print('Filter images already in database...')
imgs_existing = dbConn.execute(sql.SQL('''
SELECT filename FROM {};
''').format(sql.Identifier(project, 'image')), None, 'all')
if imgs_existing is not None:
imgs_existing = set([i['filename'] for i in imgs_existing])
else:
imgs_existing = set()
imgs = list(imgs.difference(imgs_existing))
imgs = [(i,) for i in imgs]
# push image to database
print('Adding to database...')
dbConn.insert(sql.SQL('''
INSERT INTO {} (filename)
VALUES %s;
''').format(sql.Identifier(project, 'image')),
imgs)
print('Done.') | 2.78125 | 3 |
client_example.py | coolmian/PyWeChatSpy | 0 | 12757789 | <gh_stars>0
import requests
import base64
resp = requests.get("http://localhost:5000/open_wechat").json()
print(resp)
input() # 等待手动登录
resp = requests.get(f"http://localhost:5000/get_login_qrcode/{resp['port']}").json()
print(resp)
with open("qrcode.png", "wb") as wf:
wf.write(base64.b64decode(resp["qrcode"]))
input()
resp = requests.get("http://localhost:5000/user_logout/0").json()
print(resp)
input()
resp = requests.get("http://localhost:5000/close_wechat/0").json()
print(resp)
def a():
print(111)
yield
print(222)
yield
def b():
print(333)
yield
print(444)
yield
c = a()
d = b()
next(c)
next(d)
next(c)
next(d) | 3.078125 | 3 |
datahub/company/apps.py | Staberinde/data-hub-api | 6 | 12757790 | from django.apps import AppConfig
class CompanyConfig(AppConfig):
"""Configuration class for this app."""
name = 'datahub.company'
def ready(self):
"""Registers the signal receivers for this app.
This is the preferred way to register signal receivers in the Django documentation.
"""
import datahub.company.signals # noqa: F401
| 1.90625 | 2 |
testdroid/__init__.py | lastverb/testdroid-api-client-python | 5 | 12757791 | <filename>testdroid/__init__.py
# -*- coding: utf-8 -*-
import os
import sys
import requests
import logging
import time
import base64
import imghdr
if sys.version_info[0] > 2:
import http.client
else:
import httplib
assert httplib
from optparse import OptionParser
from datetime import datetime
__version__ = '2.100.0'
FORMAT = "%(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger('testdroid')
logger.setLevel(logging.INFO)
class RequestTimeout(Exception):
def __init__(self, msg):
super(Exception, self).__init__(msg)
class CloudConnectionError(Exception):
def __init__(self, msg):
super(Exception, self).__init__(msg)
class RequestResponseError(Exception):
def __init__(self, msg, status_code):
super(Exception, self).__init__("Request Error: code %s: %s" % (status_code, msg))
self.status_code = status_code
class APIException(Exception):
def __init__(self, msg, status_code):
super(Exception, self).__init__("APIException: code %s: %s" % (status_code, msg))
self.status_code = status_code
def ts_format(timestamp):
""" Format unix timestamp to human readable. Automatically detects timestamps with seconds or milliseconds. """
if len(str(timestamp)) > 11:
return datetime.fromtimestamp(timestamp/1000).strftime('%x %X %z')
else:
return datetime.fromtimestamp(timestamp).strftime('%x %X %z')
#
# Inspiration from https://code.google.com/p/corey-projects/source/browse/trunk/python2/progress_bar.py
#
class DownloadProgressBar:
def __init__(self):
self.percent_done = 0
self.started = time.time()
self.prog_bar = ' []'
self.fill_char = '#'
self.width = 40
self.pos = 0
self.total = 0
self.eta = 'N/A'
self.duration = None
def update(self, pos, total):
self.pos = pos
self.total = total
percent_done = int(round(100.0 * pos / total)) if total > 0 else 0
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = ' [' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.duration = int(round(time.time()-self.started))
self.eta = int(round(self.duration / (percent_done / 100.0)))-self.duration if percent_done > 5 else 'N/A'
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
self.prog_bar += ' %s/%s bytes, %ss' % (self.pos, self.total, self.duration)
if pos < total:
self.prog_bar += ' (E.T.A.: %ss)' % self.eta
else:
self.prog_bar += ' '
if sys.platform.lower().startswith('win'):
print(str(self) + '\r')
else:
print(str(self) + chr(27) + '[A')
def __str__(self):
return str(self.prog_bar)
class Testdroid:
# Cloud URL (not including API path)
url = None
# Api Key for authentication
api_key = None
# Oauth access token
access_token = None
# Oauth refresh token
refresh_token = None
# Unix timestamp (seconds) when token expires
token_expiration_time = None
# Buffer size used for downloads
download_buffer_size = 65536
# polling interval when awaiting for test run completion
polling_interval_mins = 10
# Set of statuses allowing use of file
__accepted_virus_scan_statuses = {'safe', 'disabled', None}
def __init__(self, **kwargs):
""" Constructor, defaults against cloud.bitbar.com """
self.api_key = kwargs.get('apikey')
self.username = kwargs.get('username')
self.password = kwargs.get('password')
self.cloud_url = kwargs.get('url') or "https://cloud.bitbar.com"
self.download_buffer_size = kwargs.get('download_buffer_size') or 65536
def set_apikey(self, apikey):
self.api_key = apikey
def set_username(self, username):
self.username = username
def set_password(self, password):
self.password = password
def set_url(self, url):
self.cloud_url = url
def set_download_buffer_size(self, download_buffer_size):
self.download_buffer_size = download_buffer_size
def set_polling_interval_mins(self, polling_interval_mins):
self.polling_interval_mins = polling_interval_mins
def get_token(self):
""" Get Oauth2 token """
if not self.access_token:
# TODO: refresh
url = "%s/oauth/token" % self.cloud_url
payload = {
"client_id": "testdroid-cloud-api",
"grant_type": "password",
"username": self.username,
"password": <PASSWORD>
}
res = requests.post(
url,
data=payload,
headers={"Accept": "application/json"}
)
if res.status_code not in list(range(200, 300)):
raise RequestResponseError(res.text, res.status_code)
reply = res.json()
self.access_token = reply['access_token']
self.refresh_token = reply['refresh_token']
self.token_expiration_time = time.time() + reply['expires_in']
elif self.token_expiration_time < time.time():
url = "%s/oauth/token" % self.cloud_url
payload = {
"client_id": "testdroid-cloud-api",
"grant_type": "refresh_token",
"refresh_token": self.refresh_token
}
res = requests.post(
url,
data=payload,
headers={"Accept": "application/json"}
)
if res.status_code not in list(range(200, 300)):
print("FAILED: Unable to get a new access token using refresh token")
self.access_token = None
return self.get_token()
reply = res.json()
self.access_token = reply['access_token']
self.refresh_token = reply['refresh_token']
self.token_expiration_time = time.time() + reply['expires_in']
return self.access_token
def __build_headers(self):
""" Helper method for getting necessary headers to use for API calls, including authentication """
if self.api_key:
apikey = {'Authorization': 'Basic %s' % base64.b64encode((self.api_key+":")
.encode(encoding='utf_8')).decode(),
'Accept': 'application/json'}
return apikey
else:
return {'Authorization': 'Bearer %s' % self.get_token(), 'Accept': 'application/json'}
def download(self, path=None, filename=None, payload=None, callback=None):
""" Download file from API resource """
if payload is None:
payload = {}
url = "%s/api/v2/%s" % (self.cloud_url, path)
try:
res = requests.get(url, params=payload, headers=self.__build_headers(), stream=True, timeout=60.0)
if res.status_code in range(200, 300):
try:
total = res.headers['Content-length']
logger.info("Downloading %s (%s bytes)" % (filename, total))
except KeyError as e:
callback = None
pos = 0
# Check if the system is Windows or not.
if os.name == 'nt':
fd = os.open(filename, os.O_RDWR | os.O_CREAT | os.O_BINARY)
else:
fd = os.open(filename, os.O_RDWR | os.O_CREAT)
for chunk in res.iter_content(self.download_buffer_size):
os.write(fd, chunk)
if callback:
pos += len(chunk)
callback(pos, total)
time.sleep(0.1)
os.close(fd)
else:
raise RequestResponseError(res.text, res.status_code)
res.close()
except requests.exceptions.Timeout:
logger.info("")
logger.info("Download has failed. Please try to restart your download")
raise RequestTimeout("Download has failed. Please try to restart your download")
except requests.exceptions.ConnectionError:
logger.info("")
logger.info("Download has failed. Please try to restart your download")
raise CloudConnectionError("Download has failed. Please try to restart your download")
def upload(self, path=None, filename=None):
""" Upload file to API resource """
# TODO: where's the error handling?
with open(filename, 'rb') as f:
url = "%s/api/v2/%s" % (self.cloud_url, path)
files = {'file': f}
res = requests.post(url, files=files, headers=self.__build_headers())
if res.status_code not in list(range(200, 300)):
raise RequestResponseError(res.text, res.status_code)
return res.json()
def get(self, path, payload=None, headers=None):
""" GET from API resource """
if payload is None:
payload = {}
if path.find('v2/') >= 0:
cut_path = path.split('v2/')
path = cut_path[1]
(url, headers) = self.__get_request_params(path, headers)
res = requests.get(url, params=payload, headers=headers)
if res.status_code not in list(range(200, 300)):
raise RequestResponseError(res.text, res.status_code)
logger.debug(res.text)
if headers['Accept'] == 'application/json':
return res.json()
else:
return res.text
def post(self, path=None, payload=None, headers=None):
""" POST against API resources """
(url, headers) = self.__get_request_params(path, headers)
res = requests.post(url, payload, headers=headers)
if res.status_code not in list(range(200, 300)):
raise RequestResponseError(res.text, res.status_code)
return res.json()
def delete(self, path=None, headers=None):
""" DELETE API resource """
(url, headers) = self.__get_request_params(path, headers)
res = requests.delete(url, headers=headers)
if res.status_code not in list(range(200, 300)):
raise RequestResponseError(res.text, res.status_code)
return res
def __get_request_params(self, path, headers):
if headers is None:
headers = {}
return ("%s/api/v2/%s" % (self.cloud_url, path),
dict(list(self.__build_headers().items()) + list(headers.items())))
def get_me(self):
""" Returns user details """
return self.get("me")
def get_device_groups(self, limit=0):
""" Returns list of device groups """
return self.get("me/device-groups", payload={'limit': limit})
def get_devices_from_group(self, device_group_id, limit=0):
""" Returns list of devices from device group """
me = self.get_me()
path = "users/%s/device-groups/%s/devices" % (me['id'], device_group_id)
return self.get(path, payload={'limit': limit})
def get_frameworks(self, limit=0):
""" Returns list of frameworks """
return self.get("me/available-frameworks", payload={'limit': limit})
def get_devices(self, limit=0):
""" Returns list of devices """
return self.get(path="devices", payload={'limit': limit})
def print_input_files(self, limit=0):
""" Print input files """
for input_file in self.get_input_files(limit)['data']:
print("id:{} name:{} size:{} type:{}".format(
input_file['id'], input_file['name'], input_file['size'], input_file['inputType']))
def print_device_groups(self, limit=0):
""" Print device groups """
for device_group in self.get_device_groups(limit)['data']:
print("%s %s %s %s devices" %
(str(device_group['id']).ljust(12), device_group['displayName'].ljust(30),
device_group['osType'].ljust(10), device_group['deviceCount']))
def print_available_free_android_devices(self, limit=0):
""" Print available free Android devices """
print("")
print("Available Free Android Devices")
print("------------------------------")
for device in self.get_devices(limit)['data']:
if device['creditsPrice'] == 0 and not device['locked'] and device['osType'] == "ANDROID":
print(device['displayName'])
print("")
def print_available_frameworks(self, limit=0):
""" Print available frameworks """
print("")
print("Available frameworks")
print("------------------------------")
for framework in self.get_frameworks(limit)['data']:
print("id: {}\tosType:{}\tname:{}".format(framework['id'], framework['osType'], framework['name']))
print("")
def print_available_free_ios_devices(self, limit=0):
""" Print available free iOS devices """
print("")
print("Available Free iOS Devices")
print("--------------------------")
for device in self.get_devices(limit)['data']:
if device['creditsPrice'] == 0 and not device['locked'] and device['osType'] == "IOS":
print(device['displayName'])
print("")
def print_available_free_devices(self, limit=0):
""" Print available free devices """
self.print_available_free_android_devices(limit)
self.print_available_free_ios_devices(limit)
def create_project(self, project_name, project_type=None):
""" Create a project """
if project_type:
print("Project type is deprecated and not used anymore")
project = self.post(path="me/projects", payload={"name": project_name})
logger.info("Project %s: %s created" % (project['id'], project['name']))
return project
def delete_project(self, project_id):
""" Delete a project """
project = self.get_project(project_id)
if project:
self.delete("me/projects/%s" % project_id)
def get_projects(self, limit=0):
""" Returns projects for user """
return self.get(path="me/projects", payload={'limit': limit})
def get_project(self, project_id):
""" Returns a single project """
return self.get("me/projects/%s" % project_id)
def print_projects(self, limit=0):
""" Print projects """
me = self.get_me()
print("Projects for %s %s <%s>:" % (me['firstName'], me['lastName'], me['email']))
for project in self.get_projects(limit)['data']:
print("%s \"%s\"" % (str(project['id']).ljust(10), project['name']))
def get_file(self, file_id):
""" Get file """
return self.get("me/files/%s" % file_id)
def upload_file(self, filename, timeout=300, skip_scan_wait=False):
""" Upload application file to project """
me = self.get_me()
path = "users/%s/files" % (me['id'])
file = self.upload(path=path, filename=filename)
if not skip_scan_wait:
self.wait_for_virus_scan([file], timeout)
return file
def wait_for_virus_scan(self, api_files, timeout=300):
""" Wait for virus scan of all files in a collection """
loop_end = time.time() + timeout
while time.time() < loop_end:
statuses = set()
for file in api_files:
current_status = self.__get_virus_scan_status(file)
if current_status in self.__accepted_virus_scan_statuses:
statuses.add(current_status)
else: # get status after refreshing
statuses.add(self.__get_virus_scan_status(self.get_file(file['id'])))
if 'infected' in statuses:
raise APIException(400, 'File rejected by virus scan')
if self.__accepted_virus_scan_statuses.issuperset(statuses):
return
time.sleep(1)
raise APIException(408, 'Waiting for virus scan timed out')
@staticmethod
def __get_virus_scan_status(api_file):
return next((p['value'] for p in api_file['fileProperties'] if p['key'] == 'virus_scan_status'), None)
def validate_test_run_config(self, test_run_config):
""" Get test run config """
path = "me/runs/config"
return self.post(path=path, payload=test_run_config, headers={'Content-type': 'application/json',
'Accept': 'application/json'})
def start_test_run_using_config(self, test_run_config):
""" Start a test run using test run config
e.g '{"frameworkId":12252,
"osType": "ANDROID",
"projectId":1234,
"files":[{"id":9876}, {"id":5432}]
"testRunParameters":[{"key":"xyz", "value":"abc"}],
"deviceGroupId":6854
}'
client.start_test_run_using_config(json.dumps({"frameworkId":123213}))
"""
me = self.get_me()
path = "users/%s/runs" % (me['id'])
test_run = self.post(path=path, payload=test_run_config, headers={'Content-type': 'application/json',
'Accept': 'application/json'})
return test_run
def start_wait_test_run(self, test_run_config):
""" Start a test run on a device group and wait for completion """
test_run = self.start_test_run_using_config(test_run_config)
self.wait_test_run(test_run['projectId'], test_run['id'])
return test_run
def start_wait_download_test_run(self, test_run_config):
""" Start a test run on a device group, wait for completion and download results """
test_run = self.start_wait_test_run(test_run_config)
self.download_test_run(test_run['projectId'], test_run['id'])
def wait_test_run(self, project_id, test_run_id):
""" Awaits completion of the given test run """
if test_run_id:
print("Awaiting completion of test run with id {}. Will wait forever polling every {}.".format(
test_run_id,
'{} minutes'.format(self.polling_interval_mins) if self.polling_interval_mins != 1 else 'minute'))
while True:
time.sleep(self.polling_interval_mins * 60)
# WORKAROUND: access token thinks it's still valid,
# > token valid for another <PASSWORD>
# whilst this happens:
# > Couldn't establish the state of the test run with id: 72593732. Aborting
# > {u'error_description': u'Invalid access token: b<PASSWORD>',
# > u'error': u'invalid_token'}
if not self.api_key:
self.access_token = None
self.get_token() # in case it expired
test_run_status = self.get_test_run(project_id, test_run_id)
if test_run_status and 'state' in test_run_status:
if test_run_status['state'] == "FINISHED":
print("The test run with id: %s has FINISHED" % test_run_id)
break
elif test_run_status['state'] == "WAITING":
print("[%s] The test run with id: %s is awaiting to be scheduled" %
(time.strftime("%H:%M:%S"), test_run_id))
continue
elif test_run_status['state'] == "RUNNING":
print("[%s] The test run with id: %s is running" % (time.strftime("%H:%M:%S"), test_run_id))
continue
print("Couldn't establish the state of the test run with id: %s. Aborting" % test_run_id)
print(test_run_status)
sys.exit(1)
def start_device_session(self, device_model_id):
""" Start device sessions """
payload = {'deviceModelId': device_model_id}
return self.post("me/device-sessions", payload)
def stop_device_session(self, device_session_id):
""" Stop device session """
return self.post("me/device-sessions/%s/release" % device_session_id)
def get_project_test_runs(self, project_id, limit=0):
""" Get all test runs for a project """
return self.get(path="me/projects/%s/runs" % project_id, payload={'limit': limit})
def print_project_test_runs(self, project_id, limit=0):
""" Print test runs of a project to console """
test_runs = self.get_project_test_runs(project_id, limit)['data']
for test_run in test_runs:
print("%s %s %s %s" % (str(test_run['id']).ljust(10), ts_format(test_run['createTime']),
test_run['displayName'].ljust(30), test_run['state']))
def get_test_run(self, project_id, test_run_id):
""" Get a single test run """
return self.get("me/projects/%s/runs/%s" % (project_id, test_run_id))
def retry_test_run(self, project_id, test_run_id, device_session_ids=None):
""" Re-run an already-existing test run. Specify individual device session IDs to only re-run those devices. """
endpoint = "me/projects/%s/runs/%s/retry" % (project_id, test_run_id)
if device_session_ids:
endpoint += "?deviceRunIds[]=" + "&deviceRunIds[]=".join(str(device_id) for device_id in device_session_ids)
return self.post(endpoint)
def abort_test_run(self, project_id, test_run_id):
""" Abort a test run """
return self.post("me/projects/%s/runs/%s/abort" % (project_id, test_run_id))
def get_device_sessions(self, project_id, test_run_id, limit=0):
""" Return device sessions for a project """
return self.get(path="me/projects/%s/runs/%s/device-sessions" %
(project_id, test_run_id), payload={'limit': limit})
def get_device_runs(self, project_id, test_run_id, limit=0):
""" ***DEPRECATED***
Return device sessions for a project
use get_device_sessions() instead
"""
return self.get_device_sessions(project_id, test_run_id, limit)
def get_device_session_screenshots_list(self, project_id, test_run_id, device_session_id, limit=0):
""" Downloads screenshots list for a device session """
return self.get("me/projects/%s/runs/%s/device-sessions/%s/screenshots" %
(project_id, test_run_id, device_session_id), payload={'limit': limit})
def get_device_run_screenshots_list(self, project_id, test_run_id, device_run_id, limit=0):
""" ***DEPRECATED***
Downloads screenshots list for a device run
use get_device_run_screenshots_list() instead
"""
return self.get_device_session_screenshots_list(project_id, test_run_id, device_run_id, limit)
def get_device_session_files(self, project_id, test_run_id, device_session_id, tags=None):
""" Get list of files for device session """
if tags is None:
return self.get("me/projects/%s/runs/%s/device-sessions/%s/output-file-set/files" %
(project_id, test_run_id, device_session_id))
else:
return self.get("me/projects/%s/runs/%s/device-sessions/%s/output-file-set/files?tag[]=%s" %
(project_id, test_run_id, device_session_id, tags))
def get_device_run_files(self, project_id, test_run_id, device_session_id, tags=None):
""" ***DEPRECATED***
Get list of files for device run
use get_device_session_files() instead
"""
return self.get_device_session_files(project_id, test_run_id, device_session_id, tags)
def get_input_files(self, limit=0):
""" Get list of input files """
return self.get("me/files?limit={}&filter=s_direction_eq_INPUT".format(limit))
def download_test_run(self, project_id, test_run_id):
""" Downloads test run files to a directory hierarchy """
test_run = self.get_test_run(project_id, test_run_id)
device_sessions = self.get_device_sessions(project_id, test_run_id)
logger.info("")
logger.info("Test run %s: \"%s\" has %s device sessions:" %
(test_run['id'], test_run['displayName'], len(device_sessions['data'])))
for device_session in device_sessions['data']:
state = device_session['state']
logger.info("")
logger.info("%s \"%s\" %s" % (device_session['id'], device_session['device']['displayName'], state))
if state in ("ABORTED", "TIMEOUT", "WARNING", "SUCCEEDED", "FAILED", "EXCLUDED"):
directory = "%s-%s/%d-%s" % (test_run_id, test_run['displayName'], device_session['id'],
device_session['device']['displayName'])
session_id = device_session['id']
files = self.get_device_session_files(project_id, test_run_id, session_id)
self.__download_files(files, directory)
else:
logger.info("Device session hasn't ended - Skipping file downloads")
logger.info("")
def __download_files(self, files, directory):
for file in files['data']:
if file['state'] == "READY":
full_path = "%s/%s" % (directory, file['name'])
if not os.path.exists(directory):
os.makedirs(directory)
url = "me/files/%s/file" % (file['id'])
prog = DownloadProgressBar()
self.download(url, full_path, callback=lambda pos, total: prog.update(int(pos), int(total)))
print("")
else:
logger.info("File %s is not ready" % file['name'])
if len(files) == 0:
logger.info("No files to download")
logger.info("")
def download_test_screenshots(self, project_id, test_run_id):
""" Downloads test run screenshots """
test_run = self.get_test_run(project_id, test_run_id)
device_sessions = self.get_device_sessions(project_id, test_run_id)
logger.info("Test run %s: \"%s\" has %s device sessions:" %
(test_run['id'], test_run['displayName'], len(device_sessions['data'])))
for device_session in device_sessions['data']:
logger.info("%s \"%s\" %s" %
(device_session['id'], device_session['device']['displayName'], device_session['state']))
logger.info("")
for device_session in device_sessions['data']:
if device_session['state'] in ["SUCCEEDED", "FAILED", "ABORTED", "WARNING", "TIMEOUT"]:
directory = "%s-%s/%d-%s/screenshots" % (test_run['id'], test_run['displayName'],
device_session['id'], device_session['device']['displayName'])
screenshots = self.get_device_session_screenshots_list(project_id, test_run_id, device_session['id'])
no_screenshots = True
for screenshot in screenshots['data']:
no_screenshots = False
full_path = "%s/%s" % (directory, screenshot['originalName'])
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.exists(full_path):
self.__download_screenshot(project_id, test_run['id'], device_session['id'], screenshot['id'],
full_path)
else:
''' Earlier downloaded images are checked, and if needed re-downloaded.
'''
try:
if imghdr.what(full_path) in ['jpeg', 'png']:
logger.info("Screenshot %s already exists - skipping download" % full_path)
else:
raise
except:
self.__download_screenshot(project_id, test_run['id'], device_session['id'],
screenshot['id'], full_path)
if no_screenshots:
logger.info("Device %s has no screenshots - skipping" % device_session['device']['displayName'])
else:
logger.info("Device %s has errored or has not finished - skipping" %
device_session['device']['displayName'])
def __download_screenshot(self, project_id, test_run_id, device_session_id, screenshot_id, full_path):
url = "me/projects/%s/runs/%s/device-sessions/%s/screenshots/%s" % \
(project_id, test_run_id, device_session_id, screenshot_id)
prog = DownloadProgressBar()
self.download(url, full_path, callback=lambda pos, total: prog.update(int(pos), int(total)))
print("")
def get_access_groups(self):
""" Get access groups """
return self.get("me/access-groups")
def get_access_group(self, access_group_id):
""" Get access group by id """
return self.get("me/access-groups/{}".format(access_group_id))
def create_access_group(self, access_group_name, access_group_scope="USER"):
""" Create access group """
group = self.post(path="me/access-groups", payload={"name": access_group_name, "scope": access_group_scope})
return group
def update_access_group(self, access_group_id, access_group_name, access_group_scope):
""" Update access group """
# TODO: what if group_name or group_scope aren't provided??
group = self.post(path="me/access-groups/{}".format(access_group_id),
payload={"name": access_group_name, "scope": access_group_scope})
return group
def delete_access_group(self, access_group_id):
""" Delete access group """
# TODO: what if group_name or group_scope aren't provided??
return self.delete(path="me/access-groups/{}".format(access_group_id))
def get_access_group_resources(self, access_group_id):
""" Get access group resources by id """
return self.get("me/access-groups/{}/resources".format(access_group_id))
def get_access_group_resource(self, access_group_id, resource_id):
""" Get resource from access group """
return self.get("me/access-groups/{}/resources/{}".format(access_group_id, resource_id))
def delete_access_group_resource(self, access_group_id, resource_id):
""" Delete resource from access group """
return self.delete("me/access-groups/{}/resources/{}".format(access_group_id, resource_id))
def get_access_group_users(self, access_group_id):
""" Get access group users """
return self.get("me/access-groups/{}/users".format(access_group_id))
def add_access_group_user(self, access_group_id, email):
""" Add user to access group """
return self.post("me/access-groups/{}/users".format(access_group_id), payload={"email": email})
def get_access_group_user(self, access_group_id, user_id):
""" Get user from access group """
return self.get("me/access-groups/{}/users/{}".format(access_group_id, user_id))
def delete_access_group_user(self, access_group_id, user_id):
""" Delete user from access group """
return self.delete("me/access-groups/{}/users/{}".format(access_group_id, user_id))
def share_device_group(self, device_group_id, access_group_id):
""" Share device group with access group """
return self.post("me/device-groups/{}/share".format(device_group_id),
payload={"accessGroupId": access_group_id})
def share_file_set(self, file_set_id, access_group_id):
""" Share file set with access group """
return self.post("me/file-sets/{}/share".format(file_set_id), payload={"accessGroupId": access_group_id})
def share_file(self, file_id, access_group_id):
""" Share file with access group """
return self.post("me/files/{}/share".format(file_id), payload={"accessGroupId": access_group_id})
def share_project(self, project_id, access_group_id):
""" Share project with access group """
return self.post("me/projects/{}/share".format(project_id), payload={"accessGroupId": access_group_id})
def get_parser(self):
class MyParser(OptionParser):
def format_epilog(self, formatter):
return self.epilog
usage = "usage: %prog [options] <command> [arguments...]"
description = "Client for Bitbar Cloud API v2"
epilog = """
Commands:
me Get user details
available-free-devices Print list of currently available free devices
device-groups Get list of your device groups
create-project <name>
delete-project <id> Delete a project
projects Get projects
get-file <file-id> Get file details
upload-file <filename> <timeout> <skip-scan-wait>
Upload file
waits for virus scan unless skip-scan-wait is True (default: False)
up to given timeout (default: 300s)
wait-for-virus-scan <files> <timeout> Wait for virus scan of list of files to finish
up to given timeout (default: 300s)
start-wait-download-test-run <test_run_config>
Start a test run, await completion (polling) and download results
wait-test-run <project-id> <test-run-id> Await completion (polling) of the test run
test-runs <project-id> Get test runs for a project
test-run <project-id> <test-run-id> Get test run details
get_device_sessions <project-id> <test-run-id>
Get device sessions for a test run
device-runs <project-id> <test-run-id> ***DEPRECATED*** Get device runs for a test run
download-test-run <project-id> <test-run-id>
Download test run data. Data will be downloaded to
current directory in a structure:
[test-run-id]/[device-session-id]-[device-name]/files...
download-test-screenshots <project-id> <test-run-id>
Download test run screenshots. Screenshots will be downloaded to
current directory in a structure:
[test-run-id]/[device-session-id]-[device-name]/screenshots/...
access-groups Get access groups
access-group <access-group-id> Get an access group by id
access-group-create <name> <scope> Create a new access group
access-group-update <access-group-id> <name> <scope>
Update an access group
access-group-delete <access-group-id> Delete an access group
access-group-resources <access-group-id> Get resources in an access group
access-group-resource <access-group-id> <resource-id>
Get a resource in an access group by id
access-group-resource-remove <access-group-id> <resource-id>
Remove a resource from an access group
access-group-users <access-group-id> Get users in an access group
access-group-users-get <access-group-id> <user-id>
Get a user in an access group
access-group-users-add <access-group-id> <user-email>
Add a user to an access group
access-group-users-remove <access-group-id> <user-email>
Remove a user from an access group
share-device-group <device-group-id> <access-group-id>
Share a device group with an access group
share-file-set <file-set-id> <access-group-id>
Share a file set with an access group
share-file <file-id> <access-group-id> Share a file with an access group
share-project <project-id> <access-group-id>
Share a project with an access group
"""
parser = MyParser(usage=usage, description=description, epilog=epilog, version="%s %s" % ("%prog", __version__))
parser.add_option("-k", "--apikey", dest="apikey",
help="API key - the API key for Bitbar Cloud. Optional. "
"You can use environment variable TESTDROID_APIKEY as well.")
parser.add_option("-u", "--username", dest="username",
help="Username - the email address. Optional. "
"You can use environment variable TESTDROID_USERNAME as well.")
parser.add_option("-p", "--password", dest="password",
help="Password. Required if username is used. "
"You can use environment variable TESTDROID_PASSWORD as well.")
parser.add_option("-c", "--url", dest="url", default="https://cloud.bitbar.com",
help="Cloud endpoint. Default is https://cloud.bitbar.com. "
"You can use environment variable TESTDROID_URL as well.")
parser.add_option("-i", "--interval", dest="interval",
help="How frequently the status of a test run should be checked (in minutes). "
"Can be used with the command wait-test-run.")
parser.add_option("-q", "--quiet", action="store_true", dest="quiet",
help="Quiet mode")
parser.add_option("-d", "--debug", action="store_true", dest="debug",
help="Turn on debug level logging")
return parser
def get_commands(self):
return {
"me": self.get_me,
"device-groups": self.print_device_groups,
"available-free-devices": self.print_available_free_devices,
"available-frameworks": self.print_available_frameworks,
"projects": self.print_projects,
"create-project": self.create_project,
"delete-project": self.delete_project,
"get-file": self.get_file,
"upload-file": self.upload_file,
"wait-for-virus-scan": self.wait_for_virus_scan,
"validate-test-run-config": self.validate_test_run_config,
"start-test-run-using-config": self.start_test_run_using_config,
"start-wait-download-test-run": self.start_wait_download_test_run,
"wait-test-run": self.wait_test_run,
"test-run": self.get_test_run,
"test-runs": self.print_project_test_runs,
"device-sessions": self.get_device_sessions,
"device-session-files": self.get_device_session_files,
"device-runs": self.get_device_runs,
"device-run-files": self.get_device_run_files,
"list-input-files": self.print_input_files,
"download-test-run": self.download_test_run,
"access-groups": self.get_access_groups,
"access-group": self.get_access_group,
"access-group-create": self.create_access_group,
"access-group-update": self.update_access_group,
"access-group-delete": self.delete_access_group,
"access-group-resources": self.get_access_group_resources,
"access-group-resource": self.get_access_group_resource,
"access-group-resource-remove": self.delete_access_group_resource,
"access-group-users": self.get_access_group_users,
"access-group-users-add": self.add_access_group_user,
"access-group-users-get": self.get_access_group_user,
"access-group-users-remove": self.delete_access_group_user,
"share-device-group": self.share_device_group,
"share-file-set": self.share_file_set,
"share-file": self.share_file,
"share-project": self.share_project,
}
def cli(self, parser, commands):
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit(1)
if options.debug:
logger.setLevel(logging.DEBUG)
if sys.version_info[0] > 2:
http.client.HTTPConnection.debuglevel = 1
else:
httplib.HTTPConnection.debuglevel = 1
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
if options.quiet:
logger.setLevel(logging.WARNING)
username = options.username or os.environ.get('TESTDROID_USERNAME')
password = options.password or os.environ.get('TESTDROID_PASSWORD')
apikey = options.apikey or os.environ.get('TESTDROID_APIKEY')
url = os.environ.get('TESTDROID_URL') or options.url
try:
polling_interval_mins = max(int(options.interval), 1)
except:
polling_interval_mins = 10
self.set_username(username)
self.set_password(password)
self.set_apikey(apikey)
self.set_url(url)
self.set_polling_interval_mins(polling_interval_mins)
command = commands[args[0]]
if not command:
parser.print_help()
sys.exit(1)
print(command(*args[1:]) or "")
def main():
testdroid = Testdroid()
parser = testdroid.get_parser()
commands = testdroid.get_commands()
testdroid.cli(parser, commands)
if __name__ == '__main__':
main()
| 2.46875 | 2 |
libs/garden/garden.zbarcam/tests/test_zbarcam.py | Zer0897/keepstock | 0 | 12757792 | <filename>libs/garden/garden.zbarcam/tests/test_zbarcam.py
import os
import unittest
import mock
from kivy.base import EventLoop
from kivy.core.image import Image
from zbarcam import ZBarCam
FIXTURE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'fixtures')
# https://github.com/kivy/kivy/blob/1.10.1/doc/sources/faq.rst
EventLoop.ensure_window()
class TestZBarCam(unittest.TestCase):
def setUp(self):
with mock.patch('kivy.uix.anchorlayout.AnchorLayout.__init__'):
self.zbarcam = ZBarCam()
def test_detect_qrcode_frame_no_qrcode(self):
"""
Checks `_detect_qrcode_frame()` returns empty list on no qrcode.
"""
fixture_path = os.path.join(FIXTURE_DIR, 'no_qr_code.png')
texture = Image(fixture_path).texture
code_types = self.zbarcam.code_types
symbols = self.zbarcam._detect_qrcode_frame(texture, code_types)
self.assertEqual(symbols, [])
def test_detect_qrcode_frame_one_qrcode(self):
"""
Checks `_detect_qrcode_frame()` can detect one qrcode.
"""
fixture_path = os.path.join(FIXTURE_DIR, 'one_qr_code.png')
texture = Image(fixture_path).texture
code_types = self.zbarcam.code_types
symbols = self.zbarcam._detect_qrcode_frame(texture, code_types)
self.assertEqual(
symbols,
[ZBarCam.Symbol(type='QRCODE', data=b'zbarlight test qr code')])
def test_detect_qrcode_frame_one_qrcode_one_ean(self):
"""
Checks `_detect_qrcode_frame()` can detect one qrcode and one ean.
"""
fixture_path = os.path.join(FIXTURE_DIR, 'one_qr_code_and_one_ean.png')
texture = Image(fixture_path).texture
code_types = self.zbarcam.code_types
symbols = self.zbarcam._detect_qrcode_frame(texture, code_types)
# currently detects no codes, but that's a bug
self.assertEqual(symbols, [])
def test_detect_qrcode_frame_two_qrcodes(self):
"""
Checks `_detect_qrcode_frame()` can detect two qrcodes.
"""
fixture_path = os.path.join(FIXTURE_DIR, 'two_qr_codes.png')
texture = Image(fixture_path).texture
code_types = self.zbarcam.code_types
symbols = self.zbarcam._detect_qrcode_frame(texture, code_types)
Symbol = ZBarCam.Symbol
self.assertEqual(
symbols, [
Symbol(type='QRCODE', data=b'second zbarlight test qr code'),
Symbol(type='QRCODE', data=b'zbarlight test qr code'),
]
)
| 2.25 | 2 |
label_studio_ml/examples/text_classification_backends/model_text_token_classification.py | abhinavthomas/label-studio-ml-backend | 1 | 12757793 | import json
import logging
import os
import random
import spacy
from spacy.training import Example
from tqdm.auto import tqdm
from label_studio_ml.model import LabelStudioMLBase
logging.basicConfig(level=logging.INFO)
class SimpleNER(LabelStudioMLBase):
def __init__(self, **kwargs):
# don't forget to initialize base class...
super(SimpleNER, self).__init__(**kwargs)
# then collect all keys from config which will be used to extract data from task and to form prediction
# Parsed label config contains only one output of <Labels> type
assert len(self.parsed_label_config) == 1
self.from_name, self.info = list(self.parsed_label_config.items())[0]
assert self.info['type'] == 'Labels'
# the model has only one textual input
assert len(self.info['to_name']) == 1
assert len(self.info['inputs']) == 1
assert self.info['inputs'][0]['type'] == 'Text'
self.to_name = self.info['to_name'][0]
self.value = self.info['inputs'][0]['value']
if not self.train_output:
# If there is no trainings, define cold-started the simple spaCy NER model
self.reset_model()
# This is an array of <Labels> labels
self.labels = self.info['labels']
# Initialized the ner model with labels
list(map(self.ner.add_label, self.labels))
print('Initialized with from_name={from_name}, to_name={to_name}, labels={labels}'.format(
from_name=self.from_name, to_name=self.to_name, labels=str(
self.labels)
))
else:
# otherwise load the model from the latest training results
self.model_file = self.train_output['model_file']
self.model = spacy.load(self.model_file)
# and use the labels from training outputs
self.labels = self.train_output['labels']
print('Loaded from train output with from_name={from_name}, to_name={to_name}, labels={labels}'.format(
from_name=self.from_name, to_name=self.to_name, labels=str(
self.labels)
))
def reset_model(self):
self.model = spacy.blank("en")
self.model.add_pipe("ner")
self.ner = self.model.get_pipe("ner")
self.new_model = True
def predict(self, tasks, **kwargs):
# collect input texts
predictions = []
for task in tasks:
doc = self.model(task['data'][self.value])
# get named entities
result = [{
'from_name': self.from_name,
'to_name': self.to_name,
'type': 'labels',
'value': {"start": ent.start_char, "end": ent.end_char, "text": ent.text, 'labels': [ent.label_]}
} for ent in doc.ents]
predictions.append({'result': result})
print(predictions)
return predictions
def fit(self, completions, workdir=None, **kwargs):
train_data = []
_labels = []
# train the model
self.reset_model()
if self.new_model:
optimizer = self.model.begin_training()
else:
optimizer = self.model.resume_training()
for completion in completions:
# get input text from task data
if completion['annotations'][0].get('skipped') or completion['annotations'][0].get('was_cancelled'):
continue
# get an annotation
output_labels = []
for annotation in completion['annotations']:
for result in annotation['result']:
start = result['value']['start']
end = result['value']['end']
for label in result['value']['labels']:
output_labels.append((start, end, label))
_labels.append(label)
train_data.append((completion['data'][self.value], {
'entities': output_labels}))
new_labels = set(_labels)
if len(new_labels) != len(self.labels):
self.labels = list(sorted(new_labels))
print('Label set has been changed:' + str(self.labels))
# Training for 30 iterations
for _ in tqdm(range(30)):
random.shuffle(train_data)
for raw_text, entities in train_data:
doc = self.model.make_doc(raw_text)
example = Example.from_dict(doc, entities)
self.model.update([example], sgd=optimizer)
# save spaCy pipeline to model file
model_file = os.path.join(workdir, 'model')
self.model.to_disk(model_file)
train_output = {
'labels': self.labels,
'model_file': model_file
}
return train_output
| 2.5625 | 3 |
test/test_platform.py | SebastianDang/PyBall | 74 | 12757794 | import pytest
from pyball import PyBall
from pyball.models.config import Platform
@pytest.fixture(scope='module')
def test_platform():
pyball = PyBall()
return pyball.get_platforms()
def test_get_platform_returns_platform(test_platform):
assert isinstance(test_platform, list)
assert isinstance(test_platform[0], Platform)
| 2.3125 | 2 |
main.py | Adoliin/bacweb-parser | 1 | 12757795 | <gh_stars>1-10
import os
import requests
import bs4
import sys
arguments = sys.argv
# -- GLOBAL VARIABLES --
optionList = [
'Allemand',
'Espagnol',
'Russe',
'chinois',
'Turque',
'Italien',
'Ã\x89ducation Musicale',
'Arts & Plastiques',
'Théâtre'
]
sections_g = [
'math',
'science',
'economie',
'technique',
'lettres',
'sport',
'info',
]
bacDir = os.path.join(os.getcwd(),'bac')
def main():
sectionNum = menu()
subjectList = getSubjectList()
if sectionNum == 8:
for i in range(7):
getSection(subjectList, i+1)
else:
getSection(subjectList, sectionNum)
def menu():
print('Choose section(s) to download:')
print('[1] Math')
print('[2] Science')
print('[3] Economie')
print('[4] Technique')
print('[5] Lettres')
print('[6] Sport')
print('[7] Info')
print('[8] ALL')
while True:
ans = input('--> ')
if ans in ['1', '2', '3', '4', '5', '6', '7', '8']:
return int(ans)
else:
print('You must pick a number from the menu!')
continue
def getSubjectList():
mainPageSource = requests.get('http://www.bacweb.tn/section.htm')
soup = bs4.BeautifulSoup(mainPageSource.text, 'lxml')
return soup.find_all('tbody')[0].find_all('tr')
def getProjectDir(section):
#create bac folder if it dosent exist and chdir into it
projectDir = os.path.join(bacDir, f'bac-{section}')
if os.path.exists(projectDir) == False :
os.makedirs(projectDir)
os.chdir(projectDir)
return projectDir
def getSection(subjectList, sectionNum):
sectionName = sections_g[sectionNum-1]
global projectDir
projectDir = getProjectDir(sectionName)
print(f'\n~~~Downloading "{sectionName}" section:~~~')
for subject in subjectList:
sectionList = subject.find_all('td')
try:
subjectName = sectionList[0].text
except:
pass
else:
sectionSubject = sectionList[sectionNum].select('a')
if len(sectionSubject) != 0:
linkToSubject = 'http://www.bacweb.tn/'+sectionSubject[0]['href']
if subjectName in optionList:
# print('OPTION : '+subjectName)
pass
else:
getSubject(linkToSubject, subjectName)
def getSubject(linkToSubject, subjectName):
print(f'Downloading all of "{subjectName}" exams of current section.')
subjectPageSource = requests.get(linkToSubject)
soup = bs4.BeautifulSoup(subjectPageSource.text, 'lxml')
yearsList = soup.find_all('tr')
for year in yearsList:
subjectsByYear = year.find_all('td')
try:
yearNumber = int(subjectsByYear[0].text)
except:
pass
else:
getYear(yearNumber, subjectsByYear)
def getYear(yearNumber, subjectsByYear):
yearNumberDir = os.path.join(projectDir, str(yearNumber))
if os.path.exists(yearNumberDir) == False :
os.makedirs(yearNumberDir)
os.chdir(yearNumberDir)
sessionDir_P = os.path.join(yearNumberDir, 'principale')
if os.path.exists(sessionDir_P) == False :
os.makedirs(sessionDir_P)
sessionDir_C = os.path.join(yearNumberDir, 'controle')
if os.path.exists(sessionDir_C) == False :
os.makedirs(sessionDir_C)
principale_sujet = subjectsByYear[1].find_all('a')
getSujet(principale_sujet, yearNumberDir, 'principale')
principale_corrige = subjectsByYear[2].find_all('a')
getSujet(principale_corrige, yearNumberDir, 'principale')
controle_sujet = subjectsByYear[3].find_all('a')
getSujet(controle_sujet, yearNumberDir, 'controle')
controle_corrige = subjectsByYear[4].find_all('a')
getSujet(controle_corrige, yearNumberDir, 'controle')
os.chdir(projectDir)
def getSujet(sujet, yearNumberDir, promotion):
if len(sujet) != 0:
sujetLink = 'http://www.bacweb.tn/'+sujet[0]['href']
p = sujetLink.rindex('/')
sujetName = sujetLink[p+1:]
promotionDir = os.path.join(yearNumberDir, promotion)
os.chdir(promotionDir)
sujetDir = os.path.join(promotionDir, sujetName)
if os.path.exists(sujetDir) == False:
os.system(f'wget "{sujetLink}" &> /dev/null')
os.chdir(projectDir)
if __name__ == '__main__':
main()
| 2.96875 | 3 |
empower/managers/projectsmanager/appshandler.py | EstefaniaCC/empower-runtime-5g-essence-multicast | 0 | 12757796 | <filename>empower/managers/projectsmanager/appshandler.py<gh_stars>0
#!/usr/bin/env python3
#
# Copyright (c) 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Exposes a RESTful interface ."""
import uuid
import empower.managers.apimanager.apimanager as apimanager
# pylint: disable=W0223
class AppAttributesHandler(apimanager.EmpowerAPIHandler):
"""Access applications' attributes."""
URLS = [r"/api/v1/projects/([a-zA-Z0-9-]*)/apps/([a-zA-Z0-9-]*)/"
"([a-zA-Z0-9_]*)/?"]
@apimanager.validate(min_args=3, max_args=3)
def get(self, *args, **kwargs):
"""Access a particular property of an application.
Args:
[0]: the project id (mandatory)
[1]: the app id (mandatory)
[2]: the attribute of the app to be accessed (mandatory)
Example URLs:
GET /api/v1/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/apps/
7069c865-8849-4840-9d96-e028663a5dcf/stats
[
{
"last_run": "2019-08-23 09:46:52.361966"
}
]
"""
project_id = uuid.UUID(args[0])
project = self.service.projects[project_id]
service_id = uuid.UUID(args[1])
service = project.services[service_id]
if not hasattr(service, args[2]):
raise KeyError("'%s' object has no attribute '%s'" %
(service.__class__.__name__, args[2]))
return [getattr(service, args[2])]
@apimanager.validate(returncode=204, min_args=3, max_args=3)
def put(self, *args, **kwargs):
"""Set a particular property of an application.
Args:
[0]: the project id (mandatory)
[1]: the app id (mandatory)
[2]: the attribute of the app to be accessed (mandatory)
Example URLs:
PUT /api/v1/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/apps/
7069c865-8849-4840-9d96-e028663a5dcf/stats
{
"version": "1.0",
"value": {
"last_run": "2019-08-23 09:46:52.361966"
}
}
"""
project_id = uuid.UUID(args[0])
project = self.service.projects[project_id]
service_id = uuid.UUID(args[1])
service = project.services[service_id]
if not hasattr(service, args[2]):
raise KeyError("'%s' object has no attribute '%s'" %
(service.__class__.__name__, args[2]))
return setattr(service, args[2], kwargs["value"])
# pylint: disable=W0223
class AppsHandler(apimanager.EmpowerAPIHandler):
"""Applications handler."""
URLS = [r"/api/v1/projects/([a-zA-Z0-9-]*)/apps/?",
r"/api/v1/projects/([a-zA-Z0-9-]*)/apps/([a-zA-Z0-9-]*)/?"]
@apimanager.validate(min_args=1, max_args=2)
def get(self, *args, **kwargs):
"""List the apps.
Args:
[0]: the project id (mandatory)
[1]: the app id (optional)
Example URLs:
GET /api/v1/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/apps
[
{
"counters": {},
"name":
"empower.apps.wifimobilitymanager.wifimobilitymanager",
"params": {
"every": 2000,
"project_id": "52313ecb-9d00-4b7d-b873-b55d3d9ada26",
"service_id": "7069c865-8849-4840-9d96-e028663a5dcf"
},
"stats": {
"last_run": "2019-08-23 09:45:20.234651"
}
}
]
GET /api/v1/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/apps/
7069c865-8849-4840-9d96-e028663a5dcf
{
"counters": {},
"name": "empower.apps.wifimobilitymanager.wifimobilitymanager",
"params": {
"every": 2000,
"project_id": "52313ecb-9d00-4b7d-b873-b55d3d9ada26",
"service_id": "7069c865-8849-4840-9d96-e028663a5dcf"
},
"stats": {
"last_run": "2019-08-23 09:47:04.361268"
}
}
"""
project_id = uuid.UUID(args[0])
project = self.service.projects[project_id]
return project.services \
if len(args) == 1 else project.services[uuid.UUID(args[1])]
@apimanager.validate(returncode=201, min_args=1, max_args=2)
def post(self, *args, **kwargs):
"""Start a new app.
Args:
[0]: the project id (mandatory)
[1]: the app id (optional)
Request:
version: protocol version (1.0)
params: the list of parmeters to be set
Example URLs:
POST /api/v1/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/apps
{
"version": "1.0",
"name": "empower.apps.wifimobilitymanager.wifimobilitymanager",
"params": {
"every": 5000
}
}
POST /api/v1/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/apps/
7069c865-8849-4840-9d96-e028663a5dcf
{
"version": "1.0",
"name": "empower.apps.wifimobilitymanager.wifimobilitymanager",
"params": {
"every": 5000
}
}
"""
project_id = uuid.UUID(args[0])
project = self.service.projects[project_id]
service_id = uuid.UUID(args[1]) if len(args) > 1 else uuid.uuid4()
params = kwargs['params'] if 'params' in kwargs else {}
service = project.register_service(service_id=service_id,
name=kwargs['name'],
params=params)
self.set_header("Location", "/api/v1/projects/%s/apps/%s" %
(project.project_id, service.service_id))
@apimanager.validate(returncode=204, min_args=2, max_args=2)
def put(self, *args, **kwargs):
"""Update the configuration of an applications.
Args:
[0]: the project id (mandatory)
[1]: the app id (mandatory)
Request:
version: protocol version (1.0)
params: the list of parmeters to be set
Example URLs:
PUT /api/v1/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/apps/
7069c865-8849-4840-9d96-e028663a5dcf
{
"version": "1.0",
"params": {
"every": 5000
}
}
"""
project_id = uuid.UUID(args[0])
project = self.service.projects[project_id]
service_id = uuid.UUID(args[1])
params = kwargs['params'] if 'params' in kwargs else {}
project.reconfigure_service(service_id, params)
@apimanager.validate(returncode=204, min_args=2, max_args=2)
def delete(self, *args, **kwargs):
"""Stop an app.
Args:
[0]: the project id (mandatory)
[1]: the app id (mandatory)
Example URLs:
DELETE /api/v1/projects/52313ecb-9d00-4b7d-b873-b55d3d9ada26/apps/
7069c865-8849-4840-9d96-e028663a5dcf
"""
project_id = uuid.UUID(args[0])
project = self.service.projects[project_id]
service_id = uuid.UUID(args[1])
project.unregister_service(service_id)
| 2.015625 | 2 |
slick/utils/core.py | underscorephil/slick | 4 | 12757797 | from flask import g, session
from SoftLayer import TokenAuthentication, Client
def get_client():
if not hasattr(g, 'client'):
if session.get('sl_user_id'):
auth = TokenAuthentication(session['sl_user_id'],
session['sl_user_hash'])
if auth:
g.client = Client(auth=auth)
return g.client
| 2.203125 | 2 |
sparkmagic/sparkmagic/tests/test_sessionmanager.py | sciserver/sparkmagic | 1,141 | 12757798 | <reponame>sciserver/sparkmagic<filename>sparkmagic/sparkmagic/tests/test_sessionmanager.py<gh_stars>1000+
import atexit
from mock import MagicMock, PropertyMock
from nose.tools import raises, assert_equals
import sparkmagic.utils.configuration as conf
from sparkmagic.livyclientlib.exceptions import SessionManagementException
from sparkmagic.livyclientlib.sessionmanager import SessionManager
@raises(SessionManagementException)
def test_get_client_throws_when_client_not_exists():
manager = get_session_manager()
manager.get_session("name")
def test_get_client():
client = MagicMock()
manager = get_session_manager()
manager.add_session("name", client)
assert_equals(client, manager.get_session("name"))
@raises(SessionManagementException)
def test_delete_client():
client = MagicMock()
manager = get_session_manager()
manager.add_session("name", client)
manager.delete_client("name")
manager.get_session("name")
@raises(SessionManagementException)
def test_delete_client_throws_when_client_not_exists():
manager = get_session_manager()
manager.delete_client("name")
@raises(SessionManagementException)
def test_add_client_throws_when_client_exists():
client = MagicMock()
manager = get_session_manager()
manager.add_session("name", client)
manager.add_session("name", client)
def test_client_names_returned():
client = MagicMock()
manager = get_session_manager()
manager.add_session("name0", client)
manager.add_session("name1", client)
assert_equals({"name0", "name1"}, set(manager.get_sessions_list()))
def test_get_any_client():
client = MagicMock()
manager = get_session_manager()
manager.add_session("name", client)
assert_equals(client, manager.get_any_session())
@raises(SessionManagementException)
def test_get_any_client_raises_exception_with_no_client():
manager = get_session_manager()
manager.get_any_session()
@raises(SessionManagementException)
def test_get_any_client_raises_exception_with_two_clients():
client = MagicMock()
manager = get_session_manager()
manager.add_session("name0", client)
manager.add_session("name1", client)
manager.get_any_session()
def test_clean_up():
client0 = MagicMock()
client1 = MagicMock()
manager = get_session_manager()
manager.add_session("name0", client0)
manager.add_session("name1", client1)
manager.clean_up_all()
client0.delete.assert_called_once_with()
client1.delete.assert_called_once_with()
def test_cleanup_all_sessions_on_exit():
conf.override(conf.cleanup_all_sessions_on_exit.__name__, True)
client0 = MagicMock()
client1 = MagicMock()
manager = get_session_manager()
manager.add_session("name0", client0)
manager.add_session("name1", client1)
atexit._run_exitfuncs()
client0.delete.assert_called_once_with()
client1.delete.assert_called_once_with()
manager.ipython_display.writeln.assert_called_once_with(u"Cleaning up livy sessions on exit is enabled")
def test_cleanup_all_sessions_on_exit_fails():
"""
Cleanup on exit is best effort only. When cleanup fails, exception is caught and error is logged.
"""
conf.override(conf.cleanup_all_sessions_on_exit.__name__, True)
client0 = MagicMock()
client1 = MagicMock()
client0.delete.side_effect = Exception('Mocked exception for client1.delete')
manager = get_session_manager()
manager.add_session("name0", client0)
manager.add_session("name1", client1)
atexit._run_exitfuncs()
client0.delete.assert_called_once_with()
client1.delete.assert_not_called()
def test_get_session_id_for_client():
manager = get_session_manager()
manager.get_sessions_list = MagicMock(return_value=["name"])
manager._sessions["name"] = MagicMock()
id = manager.get_session_id_for_client("name")
assert id is not None
def test_get_session_name_by_id_endpoint():
manager = get_session_manager()
id_to_search = "0"
endpoint_to_search = "endpoint"
name_to_search = "name"
name = manager.get_session_name_by_id_endpoint(id_to_search, endpoint_to_search)
assert_equals(None, name)
session = MagicMock()
type(session).id = PropertyMock(return_value=int(id_to_search))
session.endpoint = endpoint_to_search
manager.add_session(name_to_search, session)
name = manager.get_session_name_by_id_endpoint(id_to_search, endpoint_to_search)
assert_equals(name_to_search, name)
def test_get_session_id_for_client_not_there():
manager = get_session_manager()
manager.get_sessions_list = MagicMock(return_value=[])
id = manager.get_session_id_for_client("name")
assert id is None
def get_session_manager():
ipython_display = MagicMock()
return SessionManager(ipython_display)
| 2.046875 | 2 |
test/test_core.py | jlerat/pybomwater | 0 | 12757799 | import unittest
import requests
import bom_water.bom_water as bm
import os
from pathlib import Path
import shapely
from bom_water.spatial_util import spatail_utilty
class test_core(unittest.TestCase):
# def __init__(self):
# super(test_core, self).__init__(self)
# self.setUp()
@classmethod
def setUpClass(self):
remove_file = os.path.join(Path.home(), '/bom_water/cache/waterML_GetCapabilities.json')
if os.path.exists(remove_file):
os.remove(remove_file)
# def test_user_path(self):
# from pathlib import Path
# print(Path.home())
def test_bom_service(self):
'''Test that the service is up
:rtype: None
'''
_bm = bm.BomWater()
try:
response = _bm.request(_bm.actions.GetCapabilities)
if response.status_code == 200:
assert True, "Test BoM service passed"
else:
assert False, f'Test BoM service failed with status_code: {response.status_code}'
except requests.exceptions.RequestException as e:
assert False, f'Test BoM service failed with RequestException: {e}'
except requests.exceptions.ConnectionError as ece:
assert False, f'Test BoM service failed with ConnectionError: {ece}'
except requests.exceptions.Timeout as et:
assert False, f'Test BoM service failed with Timeout: {et}'
def test_get_capabilities(self):
'''Get Capabilities test'''
_bm = bm.BomWater()
response = _bm.request(_bm.actions.GetCapabilities)
test_json = _bm.xml_to_json(response.text)#, f'test_GetCapabilities.json')
actions = test_json['sos:Capabilities']['ows:OperationsMetadata']['ows:Operation']
for action in actions:
for property, value in vars(_bm.actions).items():
if not action['@name'] == 'DescribeSensor':
if property == action['@name']:
print(value)
assert True, f'Test GetCapabilities passed'
continue
assert False, f'Test GetCapabilities, falied to get action: expected {action}'
def test_get_feature_of_interest(self):
'''Get Feature of interest test'''
_bm = bm.BomWater()
'''Todo: Need a small bounding box with known stations contained'''
response = _bm.request(_bm.actions.GetFeatureOfInterest,
"http://bom.gov.au/waterdata/services/stations/GW036501.2.2")
test_json = _bm.xml_to_json(response.text)#, f'test_GetFeatureOfInterest.json')
features = test_json['soap12:Envelope']['soap12:Body']['sos:GetFeatureOfInterestResponse'][
'sos:featureMember']
long_statioId = features['wml2:MonitoringPoint']['gml:identifier']['#text']
if os.path.basename(long_statioId) == 'GW036501.2.2':
assert True, "Test GetFeatureOfInterest passed"
else:
assert False, "Test GetFeatureOfInterest falied"
def test_get_data_availability(self):
'''Get Data availability test'''
_bm = bm.BomWater()
def test_get_observation(self):
'''Get Observation test'''
_bm = bm.BomWater()
def test_create_feature_geojson_list(self):
_bom = bm.BomWater()
response = _bom.request(_bom.actions.GetFeatureOfInterest, None, None, None, None, None, "-37.505032 140.999283", "-28.157021 153.638824" )
response_json = _bom.xml_to_json(response.text)
folder = f'C:\\Users\\fre171\\Documents\\pyBOMwater_dummyData\\test_stations.json'
_bom.create_feature_list(response_json, folder )
if __name__ == '__main__':
unittest.main() | 2.296875 | 2 |
_ForGitHub/Others/Data_augmentation.py | FedericoZocco/VarMemLBFGS-PyTorch | 1 | 12757800 | <gh_stars>1-10
"""
Implemented by <NAME>
Last update: 25/03/2020
Example on how to do data augmentation using Keras. Here the "TRASH" dataset is
considered. The final dataset is 21 times bigger than the original one
(i.e. 21*2527 samples/images).
"""
import random
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
train_datagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
X_trash_augmented = X_trash
y_trash_augmented = y_trash
for i in range(20*len(X_trash)):
sample = random.randrange(len(X_trash))
new_image = train_datagen.random_transform(X_trash[sample,:,:,:])
X_trash_augmented = np.append(X_trash_augmented, np.asarray([new_image]), axis=0)
y_trash_augmented = np.append(y_trash_augmented, np.asarray([y_trash[sample]]), axis=0) | 2.96875 | 3 |
world/weather/tests.py | tellg/arxcode | 5 | 12757801 | from __future__ import unicode_literals
from mock import Mock
from world.weather.models import WeatherType, WeatherEmit
from server.utils.test_utils import ArxCommandTest
from world.weather import weather_commands, weather_script, utils
from evennia.server.models import ServerConfig
class TestWeatherCommands(ArxCommandTest):
def setUp(self):
super(TestWeatherCommands,self).setUp()
self.weather1 = WeatherType.objects.create(name='Test', gm_notes='Test weather')
self.emit1 = WeatherEmit.objects.create(weather=self.weather1,
text='Test1 weather happens.')
self.weather2 = WeatherType.objects.create(name='Test2', gm_notes='Test weather')
self.emit2 = WeatherEmit.objects.create(weather=self.weather2,
text='Test2 weather happens.')
ServerConfig.objects.conf('weather_type_current', value=1)
ServerConfig.objects.conf('weather_intensity_current', value=5)
ServerConfig.objects.conf('weather_type_target', value=2)
ServerConfig.objects.conf('weather_intensity_target', value=5)
def test_cmd_adminweather(self):
self.setup_cmd(weather_commands.CmdAdminWeather, self.char1)
self.call_cmd("", "Weather pattern is Test (intensity 5), moving towards Test2 (intensity 5).")
self.call_cmd("/lock", "Weather is now locked and will not change.")
self.call_cmd("/unlock", "Weather is now unlocked and will change again as normal.")
self.call_cmd("/set Pigs soar through the sky.", "Custom weather emit set. "
"Remember to @admin_weather/announce if you want the "
"players to know.")
self.call_cmd("/set", "Custom weather message cleared. Remember to @admin_weather/announce if you want the "
"players to see a new weather emit.")
def test_weather_utils(self):
new_weather, new_intensity = utils.advance_weather()
assert(new_intensity < 5)
| 2.515625 | 3 |
fabfile.py | rebkwok/openprescribing | 0 | 12757802 | from fabric.api import run, sudo
from fabric.api import prefix, warn, abort
from fabric.api import settings, task, env, shell_env
from fabric.context_managers import cd
from datetime import datetime
import json
import os
import requests
env.hosts = ['web2.openprescribing.net']
env.forward_agent = True
env.colorize_errors = True
env.user = 'hello'
environments = {
'production': 'openprescribing',
'staging': 'openprescribing_staging'
}
# This zone ID may change if/when our account changes
# Run `fab list_cloudflare_zones` to get a full list
ZONE_ID = "198bb61a3679d0e1545e838a8f0c25b9"
# Newrelic Apps
NEWRELIC_APPIDS = {
'production': '45170403',
'staging': '45937313',
'test': '45170011'
}
def notify_slack(message):
"""Posts the message to #general
"""
# Set the webhook_url to the one provided by Slack when you create
# the webhook at
# https://my.slack.com/services/new/incoming-webhook/
webhook_url = os.environ['SLACK_GENERAL_POST_KEY']
slack_data = {'text': message}
response = requests.post(webhook_url, json=slack_data)
if response.status_code != 200:
raise ValueError(
'Request to slack returned an error %s, the response is:\n%s'
% (response.status_code, response.text)
)
def notify_newrelic(revision, url):
payload = {
"deployment": {
"revision": revision,
"changelog": url
}
}
app_id = NEWRELIC_APPIDS[env.environment]
headers = {'X-Api-Key': os.environ['NEWRELIC_API_KEY']}
response = requests.post(
("https://api.newrelic.com/v2/applications/"
"%s/deployments.json" % app_id),
headers=headers,
json=payload)
response.raise_for_status()
def git_init():
run('git init . && '
'git remote add origin '
'<EMAIL>:ebmdatalab/openprescribing.git && '
'git fetch origin && '
'git branch --set-upstream master origin/master')
def venv_init():
run('virtualenv .venv')
def git_pull():
run('git fetch --all')
run('git checkout --force origin/%s' % env.branch)
def pip_install():
if filter(lambda x: x.startswith('requirements'),
[x for x in env.changed_files]):
with prefix('source .venv/bin/activate'):
run('pip install -r requirements/production.txt')
def npm_install():
installed = run("if [[ -n $(which npm) ]]; then echo 1; fi")
if not installed:
sudo('curl -sL https://deb.nodesource.com/setup_6.x |'
'bash - && apt-get install -y '
'nodejs binutils libproj-dev gdal-bin libgeoip1 libgeos-c1;',
user=env.local_user)
sudo('npm install -g browserify && npm install -g eslint',
user=env.local_user)
def npm_install_deps(force=False):
if force or 'openprescribing/media/js/package.json' in env.changed_files:
run('cd openprescribing/media/js && npm install')
def npm_build_js():
run('cd openprescribing/media/js && npm run build')
def npm_build_css(force=False):
if force or filter(lambda x: x.startswith('openprescribing/media/css'),
[x for x in env.changed_files]):
run('cd openprescribing/media/js && npm run build-css')
def purge_urls(paths_from_git, changed_in_static):
"""Turn 2 lists of filenames (changed in git, and in static) to a list
of URLs to purge in Cloudflare.
"""
urls = []
if env.environment == 'production':
base_url = 'https://openprescribing.net'
else:
base_url = 'http://staging.openprescribing.net'
static_templates = {
'openprescribing/templates/index.html': '',
'openprescribing/templates/api.html': 'api/',
'openprescribing/templates/about.html': 'about/',
'openprescribing/templates/caution.html': 'caution/',
'openprescribing/templates/how-to-use.html': 'how-to-use/'
}
for name in changed_in_static:
if name.startswith('openprescribing/static'):
urls.append("%s/%s" %
(base_url,
name.replace('openprescribing/static/', '')))
for name in paths_from_git:
if name in static_templates:
urls.append("%s/%s" % (base_url, static_templates[name]))
return urls
def log_deploy():
current_commit = run("git rev-parse --verify HEAD")
url = ("https://github.com/ebmdatalab/openprescribing/compare/%s...%s"
% (env.previous_commit, current_commit))
log_line = json.dumps({'started_at': str(env.started_at),
'ended_at': str(datetime.utcnow()),
'changes_url': url})
run("echo '%s' >> deploy-log.json" % log_line)
notify_newrelic(current_commit, url)
if env.environment == 'production':
notify_slack(
"A #deploy just happened. Changes here: %s" % url)
def checkpoint(force_build):
env.started_at = datetime.utcnow()
with settings(warn_only=True):
inited = run('git status').return_code == 0
if not inited:
git_init()
if run('file .venv').return_code > 0:
venv_init()
env.previous_commit = run('git rev-parse --verify HEAD')
run('git fetch')
env.next_commit = run('git rev-parse --verify origin/%s' % env.branch)
env.changed_files = set(
run("git diff --name-only %s %s" %
(env.previous_commit, env.next_commit), pty=False)
.split())
if not force_build and env.next_commit == env.previous_commit:
abort("No changes to pull from origin!")
def deploy_static():
bootstrap_environ = {
'MAILGUN_WEBHOOK_USER': 'foo',
'MAILGUN_WEBHOOK_PASS': '<PASSWORD>'}
with shell_env(**bootstrap_environ):
with prefix('source .venv/bin/activate'):
run('cd openprescribing/ && '
'python manage.py collectstatic -v0 --noinput '
'--settings=openprescribing.settings.production')
def run_migrations():
if env.environment == 'production':
with prefix('source .venv/bin/activate'):
run('cd openprescribing/ && python manage.py migrate '
'--settings=openprescribing.settings.production')
else:
warn("Refusing to run migrations in staging environment")
@task
def graceful_reload():
result = run(r"""PID=$(sudo supervisorctl status | grep %s |
sed -n '/RUNNING/s/.*pid \([[:digit:]]\+\).*/\1/p');
if [[ -n "$PID" ]]; then kill -HUP $PID;
else echo "Error: server %s not running, so could not reload";
exit 1; fi""" % (env.app, env.app))
if result.failed:
# Use the error from the bash command(s) rather than rely on
# noisy (and hard-to-interpret) output from fabric
abort(result)
def find_changed_static_files():
changed = run(
"find %s/openprescribing/static -type f -newermt '%s'" %
(env.path, env.started_at.strftime('%Y-%m-%d %H:%M:%S'))).split()
return map(lambda x: x.replace(env.path + '/', ''), [x for x in changed])
@task
def list_cloudflare_zones():
url = 'https://api.cloudflare.com/client/v4/zones'
headers = {
"Content-Type": "application/json",
"X-Auth-Key": os.environ['CF_API_KEY'],
"X-Auth-Email": os.environ['CF_API_EMAIL']
}
result = json.loads(
requests.get(url, headers=headers,).text)
zones = map(lambda x: {'name': x['name'], 'id': x['id']},
[x for x in result["result"]])
print json.dumps(zones, indent=2)
def clear_cloudflare():
url = 'https://api.cloudflare.com/client/v4/zones/%s'
headers = {
"Content-Type": "application/json",
"X-Auth-Key": os.environ['CF_API_KEY'],
"X-Auth-Email": os.environ['CF_API_EMAIL']
}
data = {'purge_everything': True}
print "Purging from Cloudflare:"
print data
result = json.loads(
requests.delete(url % ZONE_ID + '/purge_cache',
headers=headers, data=json.dumps(data)).text)
if result['success']:
print "Cloudflare clearing succeeded: %s" % \
json.dumps(result, indent=2)
else:
warn("Cloudflare clearing failed: %s" %
json.dumps(result, indent=2))
def setup_cron():
sudo('cp %s/deploy/crontab-%s /etc/cron.d/' % (env.path, env.app))
@task
def deploy(environment, force_build=False, branch='master'):
if 'CF_API_KEY' not in os.environ:
abort("Expected variables (e.g. `CF_API_KEY`) not found in environment")
if environment not in environments:
abort("Specified environment must be one of %s" %
",".join(environments.keys()))
env.app = environments[environment]
env.environment = environment
env.path = "/webapps/%s" % env.app
env.branch = branch
with cd(env.path):
checkpoint(force_build)
git_pull()
pip_install()
npm_install()
npm_install_deps(force_build)
npm_build_js()
npm_build_css(force_build)
deploy_static()
run_migrations()
graceful_reload()
clear_cloudflare()
setup_cron()
log_deploy()
| 2.125 | 2 |
venv/lib/python3.7/site-packages/MDAnalysis/analysis/__init__.py | dtklinh/GBRDE | 2 | 12757803 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# <NAME>, <NAME>, <NAME>, and <NAME>.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
:mod:`MDAnalysis.analysis` --- Analysis code based on MDAnalysis
================================================================
The :mod:`MDAnalysis.analysis` sub-package contains various recipes and
algorithms that can be used to analyze MD trajectories.
If you use them please check if the documentation mentions any specific caveats
and also if there are any published papers associated with these algorithms.
Available analysis modules
--------------------------
:mod:`~MDAnalysis.analysis.align`
Fitting and aligning of coordinate frames, including the option to
use a sequence alignment to define equivalent atoms to fit on.
:mod:`~MDAnalysis.analysis.contacts`
Analyse the number of native contacts relative to a reference
state, also known as a "q1-q2" analysis.
:mod:`~MDAnalysis.analysis.density`
Creating and manipulating densities such as the density ow water
molecules around a protein. Makes use of the external
GridDataFormats_ package.
:mod:`~MDAnalysis.analysis.distances`
Functions to calculate distances between atoms and selections; it
contains the often-used
:func:`~MDAnalysis.analysis.distances.distance_array` function.
:mod:`~MDAnalysis.analysis.hbonds`
Analyze hydrogen bonds, including both the per frame results as well
as the dynamic properties and lifetimes.
:mod:`~MDAnalysis.analysis.helanal`
Analysis of helices with the HELANAL_ algorithm.
:mod:`~MDAnalysis.analysis.hole`
Run and process output from the :program:`HOLE` program
to analyze pores, tunnels and cavities in proteins.
:mod:`~MDAnalysis.analysis.gnm`
Gaussian normal mode analysis of MD trajectories with the
help of an elastic network.
:mod:`~MDAnalysis.analysis.leaflet`
Find lipids in the upper and lower (or inner and outer) leaflet of
a bilayer; the algorithm can deal with any deformations as long as
the two leaflets are topologically distinct.
:mod:`~MDAnalysis.analysis.nuclinfo`
Analyse the nucleic acid for the backbone dihedrals, chi, sugar
pucker, and Watson-Crick distance (minor and major groove
distances).
:mod:`~MDAnalysis.analysis.psa`
Perform Path Similarity Analysis (PSA) on a set of trajectories to measure
their mutual similarities, including the ability to perform hierarchical
clustering and generate heat map-dendrogram plots.
:mod:`~MDAnalysis.analysis.rdf`
Calculation of pair distribution functions
:mod:`~MDAnalysis.analysis.rms`
Calculation of RMSD and RMSF.
:mod:`~MDAnalysis.analysis.waterdynamics`
Analysis of water.
:mod:`~MDAnalysis.analysis.legacy.x3dna`
Analysis of helicoidal parameters driven by X3DNA_. (Note that this
module is not fully supported any more and needs to be explicitly
imported from :mod:`MDAnalysis.analysis.legacy`.)
.. _GridDataFormats: https://github.com/orbeckst/GridDataFormats
.. _HELANAL: http://www.ccrnp.ncifcrf.gov/users/kumarsan/HELANAL/helanal.html
.. _X3DNA: http://x3dna.org/
.. versionchanged:: 0.10.0
The analysis submodules are not automatically imported any more. Manually
import any submodule that you need.
.. versionchanged:: 0.16.0
:mod:`~MDAnalysis.analysis.legacy.x3dna` was moved to the
:mod:`MDAnalysis.analysis.legacy` package
"""
__all__ = [
'align',
'base',
'contacts',
'density',
'distances',
'gnm',
'hbonds',
'hydrogenbonds',
'helanal',
'hole',
'leaflet',
'nuclinfo',
'polymer',
'psa',
'rdf',
'rdf_s',
'rms',
'waterdynamics',
]
| 1.5625 | 2 |
codejam/moist/solve.py | binkoni/algo-practice | 0 | 12757804 | <reponame>binkoni/algo-practice
#!/usr/bin/python
# https://code.google.com/codejam/contest/2933486/dashboard
# application of insertion sort
N = int(input().strip())
for i in range(N):
M = int(input().strip())
deck = []
count = 0
for j in range(M):
deck.append(input().strip())
p = 0
for k in range(len(deck)):
if k > 0 and deck[k] < deck[p]:
count += 1
else:
p = k
print(f'Case #{i + 1}: {count}')
| 3.96875 | 4 |
poi_mining/biz/Judge.py | yummydeli/machine_learning | 1 | 12757805 | <reponame>yummydeli/machine_learning
#!/usr/bin/env python
# encoding:utf-8
# ##############################################################################
# The MIT License (MIT)
#
# Copyright (c) [2015] [baidu.com]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ##############################################################################
"""
docstring for module
"""
import biz.ngrams
class Judge(object):
"""文章句子可用性判定"""
def __init__(self, refuse, addr_accept, name_accept, name_suffix, skip):
self._refuse_key = self.load_into_set(refuse)
self._addr_accept_key = self.load_into_set(addr_accept)
self._name_accept_key = self.load_into_set(name_accept)
self._name_suffix = self.load_into_set(name_suffix)
self._skip_key = self.load_into_set(skip)
def load_into_set(self, fname):
"""加载字典"""
ks = set()
for line in open(fname):
v = line.decode('UTF-8').rstrip('\n')
if v:
ks.add(v)
return ks
def refuse(self, ustring):
"""是否拒绝此句子"""
for k in self._refuse_key:
if k in ustring:
return True
return False
def suffix_accept(self, ustring):
"""全匹配logEntropy关键词"""
for k in self._name_suffix:
if k in ustring:
return True, k
return False, None
def name_accept(self, ustring, full=False):
"""是否接受此句子为潜在的POI名称"""
tris = biz.ngrams.ngrams(ustring, 3)
if full:
if tris[0] in self._name_accept_key and tris[-1] in self._name_accept_key:
return True
return False
for each in tris:
if each in self._name_accept_key:
return True
return False
def name_gram(self, gram):
"""判断给定的gram是否包含在词典中"""
return gram in self._name_accept_key
def addr_accept(self, ustring):
"""是否接受此句子为潜在的POI地址"""
for k in self._addr_accept_key:
if k in ustring:
return True
return False
def skip(self, ustring):
"""是否跳过此句子的处理"""
for k in self._skip_key:
if k in ustring:
return True
return False
| 1.226563 | 1 |
selfdrive/car/hyundai/carcontroller.py | EasternPA/openpilot | 0 | 12757806 | <gh_stars>0
from cereal import car
from selfdrive.car import apply_std_steer_torque_limits
from selfdrive.car.hyundai.hyundaican import create_lkas11, create_lkas12, \
create_1191, create_1156, \
create_clu11
from selfdrive.car.hyundai.values import CAR, Buttons
from selfdrive.can.packer import CANPacker
# Steer torque limits
class SteerLimitParams:
STEER_MAX = 255 # 409 is the max, 255 is stock
STEER_DELTA_UP = 3
STEER_DELTA_DOWN = 7
STEER_DRIVER_ALLOWANCE = 50
STEER_DRIVER_MULTIPLIER = 2
STEER_DRIVER_FACTOR = 1
VisualAlert = car.CarControl.HUDControl.VisualAlert
def process_hud_alert(enabled, fingerprint, visual_alert, left_line,
right_line, left_lane_depart, right_lane_depart):
hud_alert = 0
if visual_alert == VisualAlert.steerRequired:
hud_alert = 3 if fingerprint in [CAR.GENESIS , CAR.GENESIS_G90, CAR.GENESIS_G80] else 5
# initialize to no line visible
lane_visible = 1
if left_line and right_line:
if enabled:
lane_visible = 3
else:
lane_visible = 4
elif left_line:
lane_visible = 5
elif right_line:
lane_visible = 6
# initialize to no warnings
left_lane_warning = 0
right_lane_warning = 0
if left_lane_depart:
left_lane_warning = 1 if fingerprint in [CAR.GENESIS , CAR.GENESIS_G90, CAR.GENESIS_G80] else 2
if right_lane_depart:
right_lane_warning = 1 if fingerprint in [CAR.GENESIS , CAR.GENESIS_G90, CAR.GENESIS_G80] else 2
return hud_alert, lane_visible, left_lane_warning, right_lane_warning
class CarController():
def __init__(self, dbc_name, car_fingerprint):
self.apply_steer_last = 0
self.car_fingerprint = car_fingerprint
self.lkas11_cnt = 0
self.clu11_cnt = 0
self.last_resume_frame = 0
self.last_lead_distance = 0
# True when giraffe switch 2 is low and we need to replace all the camera messages
# otherwise we forward the camera msgs and we just replace the lkas cmd signals
self.camera_disconnected = False
self.turning_signal_timer = 0
self.packer = CANPacker(dbc_name)
def update(self, enabled, CS, frame, actuators, pcm_cancel_cmd, visual_alert,
left_line, right_line, left_lane_depart, right_lane_depart):
if CS.left_blinker_on or CS.right_blinker_on:
self.turning_signal_timer = 100 # Disable for 1.0 Seconds after blinker turned off
if self.turning_signal_timer:
enabled = 0
### Steering Torque
apply_steer = actuators.steer * SteerLimitParams.STEER_MAX
apply_steer = apply_std_steer_torque_limits(apply_steer, self.apply_steer_last, CS.steer_torque_driver, SteerLimitParams)
if not enabled:
apply_steer = 0
steer_req = 1 if enabled else 0
self.apply_steer_last = apply_steer
hud_alert, lane_visible, left_lane_warning, right_lane_warning =\
process_hud_alert(enabled, self.car_fingerprint, visual_alert,
left_line, right_line,left_lane_depart, right_lane_depart)
can_sends = []
self.lkas11_cnt = frame % 0x10
if self.camera_disconnected:
if (frame % 10) == 0:
can_sends.append(create_lkas12())
if (frame % 50) == 0:
can_sends.append(create_1191())
if (frame % 7) == 0:
can_sends.append(create_1156())
can_sends.append(create_lkas11(self.packer, self.car_fingerprint, apply_steer, steer_req, self.lkas11_cnt,
enabled, CS.lkas11, hud_alert, lane_visible, left_lane_depart, right_lane_depart,
keep_stock=(not self.camera_disconnected)))
#if pcm_cancel_cmd:
#self.clu11_cnt = frame % 0x10
#can_sends.append(create_clu11(self.packer, CS.clu11, Buttons.CANCEL, self.clu11_cnt))
if CS.stopped:
# run only first time when the car stops
if self.last_lead_distance == 0:
# get the lead distance from the Radar
self.last_lead_distance = CS.lead_distance
self.clu11_cnt = 0
# when lead car starts moving, create 6 RES msgs
elif CS.lead_distance > self.last_lead_distance and (frame - self.last_resume_frame) > 5:
can_sends.append(create_clu11(self.packer, CS.clu11, Buttons.RES_ACCEL, self.clu11_cnt))
self.clu11_cnt += 1
# interval after 6 msgs
if self.clu11_cnt > 5:
self.last_resume_frame = frame
self.clu11_cnt = 0
# reset lead distnce after the car starts moving
elif self.last_lead_distance != 0:
self.last_lead_distance = 0
if self.turning_signal_timer > 0:
self.turning_signal_timer -= 1
return can_sends
| 2.234375 | 2 |
amazing_semantic_segmentation/builders/__init__.py | jarodhanko/Amazing-Semantic-Segmentation | 0 | 12757807 | from .model_builder import builder | 1.054688 | 1 |
xservs/tmp_getbkg.py | CTJChen/ctc_astropylib | 0 | 12757808 | <filename>xservs/tmp_getbkg.py
def getbkg_iter(qm,rmagbin,NX,catopt,optdf,r_in = 10., r_out=50.,\
nmagbin=15, magname = 'imag_psf', ora='ra',odec='dec'):
'''
Use external qm, rmagbin to draw random sample from optdf
then use those positions to search for background sources in catopt
'''
bkgdf = pd.DataFrame({'ra':optdf[ora].values.copy(),'dec':optdf[odec].values.copy(),
'mag':optdf[magname].values.copy(),'rand':np.random.uniform(0,1,len(optdf))
})
out=pd.cut(bkgdf['mag'].values,bins=rmagbin)
grp=bkgdf.groupby(out)
nsrc = qm*NX
cnt = 0
xra = np.array([],dtype=float)
xdec = np.array([],dtype=float)
imag = np.array([],dtype=float)
for ks in grp.groups.keys():
ns = nsrc[cnt]
tdf = grp.get_group(ks)
xra = np.hstack((xra,
tdf[tdf['rand'].values <= ns/len(tdf)]['ra'].values
))
xdec = np.hstack((xdec,
tdf[tdf['rand'].values <= ns/len(tdf)]['dec'].values))
imag = np.hstack((imag,
tdf[tdf['rand'].values <= ns/len(tdf)]['icmodel_mag'].values))
cnt += 1
fcat =
idhsc,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r_in*u.arcsec)
#Excluding each optical source with an x-ray source within r_in
itmp=np.arange(len(catopt))
itmp[np.unique(idhsc)]=-1
#indicies for optical sources with **NO** X-ray counterparts within r_in
idhsc_ext=np.where(np.equal(optdf.index.values, itmp))[0]
#Now search for X-ray and optical matches within r_out
idhsc_in,idxmm,d2d,d3d=xcat.search_around_sky(catopt,r_out*u.arcsec)
idhsc_in = np.unique(idhsc_in)
#Cross-correlated the ``no r_in list'', and the ``r_out list''
#This will create a list of ``background optical sources''
idhsc_bkgd=np.intersect1d(idhsc_ext,idhsc_in)
hsc_bkgd=optdf.loc[idhsc_bkgd].copy()
hsc_bkgd.reset_index(inplace=True)
if magonly:
return hsc_bkgd[magname].values
else:
out,rmagbin=pd.cut(hsc_bkgd[magname].values,bins=nmagbin,retbins=True)
groups=hsc_bkgd.groupby(out)
#number density = total number of sources divided by the area of annulus
N_xmm=len(xcat) #number of unique XMM sources
N_bkgd=len(hsc_bkgd)
nm=groups[ora].count().values/(np.pi*(r_out**2-r_in**2)*N_xmm)
if corr_glob | globonly:
#According to Brusa et al. 2007, at faint magnitudes
#nm is not correct and should use a global one.
out,rmagbin_global=pd.cut(optdf[magname].values,bins=nmagbin,retbins=True)
groups=optdf.groupby(out)
rmag_global = binvalue(rmagbin_global)
area = \
(optdf[ora].max() - optdf[ora].min())*(optdf[odec].max() - optdf[odec].min())*3600**2
nm_global = groups[ora].count().values/area
iglobal = np.where(rmagbin > 23.)[0][:-1]
if corr_glob:
nm[iglobal] = nm_global[iglobal]
elif globonly:
return nm_global, rmagbin
else:
return nm,rmagbin
| 1.921875 | 2 |
tests/test_jinja_ext_tab.py | odsod/site-generator | 13 | 12757809 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import textwrap
import jinja2
from aip_site.jinja.ext.tab import TabExtension
def test_tab():
t = jinja2.Template(textwrap.dedent("""
{% tab proto %}
Something something
More more more
{% endtabs %}
"""), extensions=[TabExtension])
rendered = t.render()
assert '=== "Protocol buffers"' in rendered
assert ' Something something\n' in rendered
assert ' More more more\n' in rendered
def test_multiple_tabs():
t = jinja2.Template(textwrap.dedent("""
{% tab proto %}
Something something
{% tab oas %}
Something else
{% endtabs %}
"""), extensions=[TabExtension])
rendered = t.render()
assert '=== "Protocol buffers"' in rendered
assert '=== "OpenAPI 3.0"' in rendered
| 2 | 2 |
schafkopf/pygame_gui/GameModeWidget.py | Taschee/schafkopf | 10 | 12757810 | from typing import Union, Tuple, Callable
import pygame
from schafkopf.game_modes import *
from schafkopf.pygame_gui.Button import Button
from schafkopf.pygame_gui.colors import WHITE, BLACK, RED
class GameModeWidget(Button):
def __init__(
self,
topleft: Tuple[int, int] = (0, 0),
bidding_option: Tuple[int, Union[int, None]] = (NO_GAME, None),
callback: Callable = None,
font_size: int = 40,
clickable: bool = True
):
margin = 10
font = pygame.font.Font(None, font_size)
text = font.render(get_bidding_option_as_text(bidding_option), True, BLACK)
height = font_size
width = text.get_width() + 2 * margin
image = pygame.Surface((width, height))
image.fill(WHITE)
image.set_alpha(180)
image.blit(text, (margin, margin))
if clickable:
button_down_image = pygame.Surface((width, height))
button_down_image.fill(pygame.Color('grey'))
button_down_image.set_alpha(180)
button_down_image.blit(text, (margin, margin))
hover_image = pygame.Surface((width + 5, height + 5))
hover_image.fill(pygame.Color("lightgrey"))
hover_image.set_alpha(180)
hover_image.blit(text, (margin + 2, margin + 2))
else:
button_down_image = image
hover_image = image
super().__init__(
topleft=topleft,
image=image,
button_down_image=button_down_image,
hover_image=hover_image,
callback=callback
)
def get_bidding_option_as_text(option: tuple[int, Union[int, None]]):
if option[1] is None:
return game_mode_dict[option[0]]
else:
return game_mode_dict[option[0]] + " " + suit_dict[option[1]]
game_mode_dict: dict[int, str] = {
NO_GAME: "Weiter",
PARTNER_MODE: "Sauspiel",
WENZ: "Wenz",
SOLO: "Solo",
}
suit_dict: dict[int, str] = {
ACORNS: "Eichel",
LEAVES: "Gras",
HEARTS: "Herz",
BELLS: "Schellen"
}
| 3.015625 | 3 |
project/stock/migrations/0002_auto_20190718_0206.py | xico-labs/stock_control_django | 1 | 12757811 | <filename>project/stock/migrations/0002_auto_20190718_0206.py<gh_stars>1-10
# Generated by Django 2.2.1 on 2019-07-18 02:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('stock', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='estoqueitens',
name='estoque',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='estoques', to='stock.Estoque'),
),
]
| 1.195313 | 1 |
game.py | brijes-h/Tic-Tac-Toe | 0 | 12757812 | # Modules
import pygame
import numpy as np
import random
from pygame.constants import KEYDOWN
import settings as s
# Initialize pygame
pygame.init()
# screen
screen = pygame.display.set_mode((s.WIDTH,s.HEIGHT))
# Title and Icon
pygame.display.set_caption('TIC TAC TOE')
icon = pygame.image.load('icon.png')
pygame.display.set_icon(icon)
screen.fill(s.BG_COLOR)
# console board
board = np.zeros((3,3))
# Functions
def drawLines(): # Drawing lines function
# horizontal lines
pygame.draw.line(screen, s.LINE_COLOR, (0,s.SQUARE_SIZE), (500,s.SQUARE_SIZE), s.LINE_WIDTH)
pygame.draw.line(screen, s.LINE_COLOR, (0, 332), (500, 332), s.LINE_WIDTH)
# vertical lines
pygame.draw.line(screen, s.LINE_COLOR, (s.SQUARE_SIZE, 0), (s.SQUARE_SIZE, 500), s.LINE_WIDTH)
pygame.draw.line(screen, s.LINE_COLOR, (332, 0), (332, 500), s.LINE_WIDTH)
def playerEquals(x, y, z):
return x!=0 and x==y and y==z
def checkDraw():
emp = 0
for row in range (s.ROWS):
for col in range (s.COLS):
if availableSquare(row, col):
emp += 1
if emp==0:
return 'Draw'
def checkWinner():
winner = None
# check for tie
winner = checkDraw()
# vertical win
for col in range (s.COLS):
if playerEquals(board[0][col], board[1][col], board[2][col]):
winner = board[0][col]
# horizontal win
for row in range (s.ROWS):
if playerEquals(board[row][0], board[row][1], board[row][2]):
winner = board[row][0]
# ascending diagonal win
if playerEquals(board[2][0], board[1][1], board[0][2]):
winner = board[2][0]
# descending diagonal win
if playerEquals(board[0][0], board[1][1], board[2][2]):
winner = board[0][0]
return winner
# functions for drawing winning lines
def vertical_winline(col, winner):
posX = col * s.SQUARE_SIZE + s.SQUARE_SIZE//2 # column is constant
if winner == 1:
color = s.O_COLOR
elif winner == 2:
color = s.X_COLOR
pygame.draw.line(screen, color, (posX, 15), (posX, s.HEIGHT-15), 15)
def horizontal_winline(row, winner):
posY = row * s.SQUARE_SIZE + s.SQUARE_SIZE//2 # row is constant
if winner == 1:
color = s.O_COLOR
else:
color = s.X_COLOR
pygame.draw.line(screen, color, (15, posY), (s.WIDTH-15, posY), 15)
def asc_diagonal_winline(winner):
if winner == 1:
color = s.O_COLOR
else:
color = s.X_COLOR
pygame.draw.line(screen, color, (15, s.HEIGHT-15), (s.WIDTH-15, 15), 15)
def desc_diagonal_winline(winner):
if winner == 1:
color = s.O_COLOR
else:
color = s.X_COLOR
pygame.draw.line(screen, color, (15, 15), (s.WIDTH-15, s.HEIGHT-15), 15)
# function for drawing Os and Xs
def figures():
for row in range(3):
for col in range(3):
if board[row][col] == 1:
pygame.draw.circle(screen, s.O_COLOR, ( int(col * s.SQUARE_SIZE + 83), int(row * s.SQUARE_SIZE + 83)), s.C_RADIUS, s.C_WIDTH)
elif board[row][col] == 2:
pygame.draw.line(screen, s.X_COLOR, (col * s.SQUARE_SIZE + s.SPACE, row * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE ), (col * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE, row * s.SQUARE_SIZE + s.SPACE), s.CROSS_WIDTH)
pygame.draw.line(screen, s.X_COLOR, (col * s.SQUARE_SIZE + s.SPACE, row * s.SQUARE_SIZE + s.SPACE ), (col * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE, row * s.SQUARE_SIZE + s.SQUARE_SIZE - s.SPACE), s.CROSS_WIDTH)
def markSquare(row, col, player):
board[row][col] = player
def availableSquare(row, col):
return board[row][col] == 0
def isBoardFull():
for row in range (3):
for col in range (3):
if board[row][col] == 0:
return False
return True
def restart():
screen.fill(s.BG_COLOR)
drawLines()
player = 1
for row in range (s.ROWS):
for col in range (s.COLS):
board[row][col] = 0
def render():
x = checkWinner()
if x != None and x != 'Draw':
# vertical win
for col in range (s.COLS):
if playerEquals(board[0][col], board[1][col], board[2][col]):
winner = board[0][col]
vertical_winline(col, winner)
# horizontal win
for row in range (s.ROWS):
if playerEquals(board[row][0], board[row][1], board[row][2]):
winner = board[row][0]
horizontal_winline(row, winner)
# ascending diagonal win
if playerEquals(board[2][0], board[1][1], board[0][2]):
winner = board[2][0]
asc_diagonal_winline(winner)
# descending diagonal win
if playerEquals(board[0][0], board[1][1], board[2][2]):
winner = board[0][0]
desc_diagonal_winline(winner)
display(x)
def display(x):
if x == 1:
text = "O WINS!!! Press 'R' to play again!"
drawTexttoScreen (screen, text, 250, 250, 'GREEN')
elif x == 2:
text = "X WINS!!! Press 'R' to play again!"
drawTexttoScreen (screen, text, 250, 250)
elif x == 'Draw':
text = "DRAW!!! Press 'R' to play again!"
drawTexttoScreen (screen, text, 250, 250)
def drawTexttoScreen (screen, text, x, y, color = (250, 0, 0)):
font = pygame.font.SysFont('chalkduster.ttf', 30)
textSurface = font.render(text, True, color)
textRect = textSurface.get_rect()
textRect.centerx = x
textRect.centery = y
screen.blit(textSurface, textRect)
def playerMove(row, col, player):
markSquare(row, col, player)
return
def compMove():
bestScore = float('-inf')
new_r = new_c = None
for row in range(s.ROWS):
for col in range(s.COLS):
if availableSquare(row, col):
markSquare(row, col, 1)
score = minimax(0, float('-inf'), float('inf'), False)
markSquare(row, col, 0)
if score > bestScore:
bestScore = score
new_r, new_c = row, col
markSquare(new_r, new_c, 1)
return
# Minimax function
def minimax(depth, alpha, beta, is_maximizing):
winner = checkWinner()
if winner != None:
return s.score[winner]
if is_maximizing:
bestScore = float('-inf')
for row in range(s.ROWS):
for col in range(s.COLS):
if availableSquare(row, col):
markSquare(row, col, 1)
score = minimax(depth + 1, alpha, beta, False)
markSquare(row, col, 0)
bestScore = max(score, bestScore)
alpha = max(alpha, bestScore) # pruning
if beta <= alpha:
return bestScore
return bestScore
else:
bestScore = float('inf')
for row in range(3):
for col in range(3):
if availableSquare(row, col):
markSquare(row, col, 2)
score = minimax(depth + 1, alpha, beta, True)
markSquare(row, col, 0)
bestScore = min(score, bestScore)
beta = min(beta, bestScore) # pruning
if beta <= alpha:
return bestScore
return bestScore
drawLines()
player = random.choice(s.p) # initializing player
gameOver = False
# game loop
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
# for comp move
if player == 1 and not gameOver:
compMove()
winner = checkWinner()
if winner != None:
gameOver = True
player = 2
figures()
render()
if event.type == pygame.MOUSEBUTTONDOWN and not gameOver:
mouseX = event.pos[0] # x coordinate
mouseY = event.pos[1] # y coordinate
clicked_row = int(mouseY // s.SQUARE_SIZE)
clicked_col = int(mouseX // s.SQUARE_SIZE)
# for player move
if availableSquare (clicked_row, clicked_col):
if player == 2:
playerMove(clicked_row, clicked_col, 2)
winner = checkWinner()
if winner != None:
gameOver = True
player = 1
figures()
render()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
restart()
gameOver = False # changing gameOver to False for the next game
pygame.display.update()
| 3.609375 | 4 |
python/module/spatial/__init__.py | DerThorsten/spatial | 0 | 12757813 | from . _spatial import *
def pure_python():
"""
hello
"""
pass | 0.980469 | 1 |
backend/app/api/routes/__init__.py | dakotagporter/wine-helms | 1 | 12757814 | """
Aggregation of all application routes into a single router. All created routers are imported here and
added to a single router for access from the app.api.server file.
router:
- Initial instantiation of a router
- All routers are aggregated to this router
- All routers are given a name (to appear in the URL) and a tag (for documentation)
"""
from fastapi import APIRouter
# Import routes
from app.api.routes.dummy import router as dummy_router
from app.api.routes.forecast import router as forecast_router
from app.api.routes.viz import router as viz_router
from app.api.routes.users import router as users_router
# Access all routes with this router
router = APIRouter()
# Include all routes
router.include_router(dummy_router, prefix="/dummy", tags=["dummy"])
router.include_router(forecast_router, prefix="/forecast", tags=["forecast"])
router.include_router(users_router, prefix="/users", tags=["users"]) | 2.90625 | 3 |
aster_ocr_utils/weigths_tf1_to_tf2.py | NoAchache/TextBoxGan | 36 | 12757815 | <filename>aster_ocr_utils/weigths_tf1_to_tf2.py<gh_stars>10-100
import tensorflow as tf
OLD_AND_NEW_NAMES = {
"weights": "kernel",
"fully_connected": "dense",
"biases": "bias",
"Predictor/decoder/dense": "Predictor/dense",
"Backward/Predictor/decoder/sync_attention_wrapper/bahdanau_attention/query_layer": "sync_attention_wrapper_1/BahdanauAttention",
"Forward/Predictor/decoder/sync_attention_wrapper/bahdanau_attention/query_layer": "sync_attention_wrapper/BahdanauAttention",
"Predictor/decoder/sync_attention_wrapper/lstm_cell": "Predictor/lstm_cell",
"decoder/sync_attention_wrapper/bahdanau_attention/attention_v": "BahdanauAttention/attention_v",
"Predictor/memory_layer": "Predictor/BahdanauAttention",
}
ASTER_ORIGINAL_WEIGHTS = "" # path to aster weights
ASTER_MODIFIED_WEIGHTS = "" # local path
def rename_weigths():
"""
Rename aster layers to switch from tf1 to tf2
"""
tf1_weights = ASTER_ORIGINAL_WEIGHTS
tf2_weights = ASTER_MODIFIED_WEIGHTS
new_vars = []
with tf.compat.v1.Session() as sess:
for var_name, _ in tf.train.list_variables(tf1_weights):
# Load the variable
var = tf.train.load_variable(tf1_weights, var_name)
new_name = var_name
for old, new in OLD_AND_NEW_NAMES.items():
new_name = new_name.replace(old, new)
new_vars.append(tf.Variable(var, name=new_name))
saver = tf.compat.v1.train.Saver(new_vars)
sess.run(
[
tf.compat.v1.global_variables_initializer(),
tf.compat.v1.local_variables_initializer(),
tf.compat.v1.tables_initializer(),
]
)
saver.save(sess, tf2_weights)
if __name__ == "__main__":
rename_weigths()
| 2.453125 | 2 |
VJ_Training.py | sgino209/ViolaJones_TrainClassify | 1 | 12757816 | <filename>VJ_Training.py
#!/usr/bin/env python
import os
import re
import csv
import cv2
import sys
# User Arguments:
SAMPLES_CSV = "samples_26_31_highprob_r04.csv"
SAMPLE_SIZE = (100, 100)
WINDOW_SIZE = (20, 20)
STAGES_NUM = 18
POSITIVE_NUM = 7000
POSITIVE_EXTENSION = False
if len(sys.argv) > 1:
STAGES_NUM = sys.argv[1]
# --------------------------------------------------------------------------------------------------------------------
# noinspection PyPep8Naming
def generate_samples(_baseDir, _index):
"""Gets a base directory and an CSV-Index pointer, and generate sample for VJ Training phase"""
csvIndexFile = _baseDir + '/Data/' + _index
csvIndex_f = open(csvIndexFile)
print "Loading Index file: " + csvIndexFile
csv_f = csv.reader(csvIndex_f)
pic_num = {}
label_col, minX_col, maxX_col, minY_col, maxY_col, img_col = 0, 0, 0, 0, 0, 0
for row in csv_f:
if row[0] == 'object_id':
label_col = row.index('predicted_class')
minX_col = row.index('Coord<Minimum>_0')
maxX_col = row.index('Coord<Maximum>_0')
minY_col = row.index('Coord<Minimum>_1')
maxY_col = row.index('Coord<Maximum>_1')
img_col = row.index([s for s in row if "exported" in s][0])
elif row[0] != "":
lbl = row[label_col].lower()
x = int(row[minX_col])
y = int(row[minY_col])
w = int(row[maxX_col]) - int(row[minX_col])
h = int(row[maxY_col]) - int(row[minY_col])
imgCsv = row[img_col].split('.')[0]
imgName = re.search('.*(usda_.*)_table', imgCsv).group(1)
serie = re.search('usda_(.+?)_.*', imgName).group(1)
if lbl not in pic_num:
pic_num[lbl] = 0
dirName = _baseDir + '/Code/VJ/Training/' + lbl
if not os.path.exists(dirName):
os.makedirs(dirName)
imgFile = _baseDir + '/Data/' + serie + '/' + serie + 'FT/' + imgName + '.tiff'
img = cv2.imread(imgFile, cv2.IMREAD_GRAYSCALE)
img_crop = img[y:y + h, x:x + w]
img_scl = cv2.resize(img_crop, SAMPLE_SIZE, interpolation=cv2.INTER_CUBIC)
sampleName = dirName + '/' + lbl + '_' + str(pic_num[lbl]) + ".jpg"
print "Generating a new " + lbl.upper() + " sample: " + sampleName
cv2.imwrite(sampleName, img_scl)
pic_num[lbl] += 1
csvIndex_f.close()
print generate_samples.__name__ + " Done!"
for lbl, pics_num in pic_num.iteritems():
print lbl.upper() + ' --> ' + str(pics_num) + ' were created'
return pic_num
# --------------------------------------------------------------------------------------------------------------------
# noinspection PyPep8Naming
def increase_pos_num(_baseDir, _posLabel, _posNum):
"""create training samples from one image applying distortions"""
imgsPath = os.listdir(_baseDir + '/' + _posLabel)
dirName = _baseDir + '/training_' + _posLabel + '/vec'
if not os.path.exists(dirName):
os.makedirs(dirName)
for imgFile in imgsPath:
appName = 'opencv_createsamples'
appArgs = ' -img ' + _baseDir + '/' + _posLabel + '/' + imgFile +\
' -num ' + str(int(_posNum/len(imgsPath))) +\
' -bg ' + _baseDir + '/training_' + _posLabel + '/bg1.txt' +\
' -vec ' + dirName + '/' + imgFile.split('.')[0] + '.vec' +\
' -maxxangle 0.6' +\
' -maxyangle 0' +\
' -maxzangle 0.3' +\
' -maxidev 100' +\
' -bgcolor 0' +\
' -bgthresh 0' +\
' -w ' + str(WINDOW_SIZE[0]) + ' -h ' + str(WINDOW_SIZE[1])
print "Launching: " + appName + " " + appArgs
os.system(appName + appArgs)
print increase_pos_num.__name__ + " Done!"
# --------------------------------------------------------------------------------------------------------------------
# noinspection PyPep8Naming
def merge_pos_vec(_baseDir, _posLabel):
"""Stitch all positive samples into a vector file"""
appName = 'mergevec.py'
appArgs = ' -v ' + _baseDir + '/training_' + _posLabel + '/vec' +\
' -o ' + _baseDir + '/training_' + _posLabel + '/positives_' + _posLabel + '.vec'
print "Launching: " + appName + " " + appArgs
os.system(appName + appArgs)
print merge_pos_vec.__name__ + " Done!"
# --------------------------------------------------------------------------------------------------------------------
# noinspection PyPep8Naming
def create_pos_n_neg(_baseDir, _posLabel):
"""Creates positive index file for posLabel and negative index files for all other labels (used by VJ training)"""
dirName = _baseDir + '/training_' + _posLabel
if not os.path.exists(dirName):
os.makedirs(dirName)
posFile = open(dirName + '/info.dat', 'w')
negFile1 = open(dirName + '/bg1.txt', 'w')
negFile2 = open(dirName + '/bg2.txt', 'w')
negCntr = 0
for labelDir in os.listdir(_baseDir):
if os.path.isdir(labelDir):
if labelDir == _posLabel:
for imgFile in os.listdir(_baseDir+'/'+labelDir):
line = '../' + labelDir + '/' + imgFile + ' ' +\
'1 0 0 ' + str(SAMPLE_SIZE[0]) + ' ' + str(SAMPLE_SIZE[1]) + '\n'
posFile.write(line)
elif labelDir == 'fibers':
for imgFile in os.listdir(_baseDir + '/' + labelDir):
line = labelDir + '/' + imgFile + '\n'
negFile1.write('../' + line)
negFile2.write('./' + line)
negCntr += 1
negFile1.close()
negFile2.close()
posFile.close()
print create_pos_n_neg.__name__ + " Done!"
return negCntr
# --------------------------------------------------------------------------------------------------------------------
# noinspection PyPep8Naming
def generate_pos_vector(_infoFile, _posVecFile, _posNum):
"""Stitch all positive samples into a vector file"""
appName = 'opencv_createsamples'
appArgs = ' -info ' + _infoFile +\
' -num ' + str(_posNum) +\
' -w ' + str(WINDOW_SIZE[0]) + ' -h ' + str(WINDOW_SIZE[1]) + \
' -vec ' + _posVecFile
print "Launching: " + appName + " " + appArgs
os.system(appName + appArgs)
print generate_pos_vector.__name__ + " Done!"
# --------------------------------------------------------------------------------------------------------------------
# noinspection PyPep8Naming
def show_pos_vector(_posVecFile):
"""Show the positive vector, just for verifying that allright, toggle images by space-bar, exit with ESC"""
appName = 'opencv_createsamples'
appArgs = ' -w ' + str(WINDOW_SIZE[0]) + ' -h ' + str(WINDOW_SIZE[1]) + \
' -vec ' + _posVecFile
print "Launching (press ESC to exit): " + appName + " " + appArgs
os.system(appName + appArgs)
print generate_pos_vector.__name__ + " Done!"
# --------------------------------------------------------------------------------------------------------------------
# noinspection PyPep8Naming
def VJ_Training(_posVecFile, _outputXML, _negBgTxtFile, _posNum, _negNum, _stagesNum):
"""Viola-Jones Training, generates a corresponding cascasde file"""
if not os.path.exists(_outputXML):
os.makedirs(_outputXML)
appName = 'opencv_traincascade'
appArgs = ' -data ' + _outputXML +\
' -vec ' + _posVecFile +\
' -bg ' + _negBgTxtFile + \
' -numPos ' + str(_posNum) + \
' -numNeg ' + str(_negNum) + \
' -w ' + str(WINDOW_SIZE[0]) + ' -h ' + str(WINDOW_SIZE[1]) + \
' -numStages ' + str(_stagesNum) + \
' -mode ALL'
print "Launching: " + appName + " " + appArgs
os.system(appName + appArgs)
print generate_pos_vector.__name__ + " Done!"
# --------------------------------------------------- S T A R T ------------------------------------------------------
# Load CSV index - marks all objects coordinates:
labelsDict = generate_samples('../../..', SAMPLES_CSV)
del labelsDict['fibers']
for label in labelsDict.keys():
print "-----------------------------------------------------------------------"
print "Start Training for: " + label
print "-----------------------------------------------------------------------"
# Create Positive and Negative samples:
negCntr = create_pos_n_neg('.', label)
# Increase Positive sample volume
if POSITIVE_EXTENSION:
increase_pos_num('.', label, POSITIVE_NUM)
merge_pos_vec('.', label)
else:
POSITIVE_NUM = len(os.listdir('./' + label))
# Initialize parameters:
subDir = './training_' + label
infoName = subDir + '/info.dat'
vecName = subDir + '/positives_' + label + '.vec'
outputXML = subDir + '/data'
bgName = subDir + '/bg2.txt'
posNumVec = int(POSITIVE_NUM * 0.9)
posNumVJ = int(POSITIVE_NUM * 0.7)
negNumVJ = int(min(negCntr, posNumVJ/2))
stagesNumVJ = STAGES_NUM
# Generate Positive vector (stitches all Positive samples):
generate_pos_vector(infoName, vecName + '_orig.vec', posNumVec)
#show_pos_vector(vecName)
# Generate VJ cascade, by performing Training phase:
VJ_Training(vecName, outputXML, bgName, posNumVJ, negNumVJ, stagesNumVJ)
print "-----------------------------------------------------------------------"
print "All Done!"
print "-----------------------------------------------------------------------"
for label in labelsDict.keys():
print label + ' ---> ' + './training_' + label + '/data/cascade.xml'
| 2.90625 | 3 |
src/news/models.py | Busaka/ecn | 0 | 12757817 | from django.db import models
# Create your models here.
class New(models.Model):
heading_one = models.CharField(max_length=500)
h1_paragraph1 = models.TextField()
h1_paragraph2 = models.TextField(blank=True)
h1_paragraph3 = models.TextField(blank=True)
image_one = models.ImageField(upload_to='news/news_photos')
file_one = models.FileField(upload_to='news/news_files', blank=True)
heading_two = models.CharField(max_length=500)
h2_paragraph1 = models.TextField()
h2_paragraph2 = models.TextField(blank=True)
h2_paragraph3 = models.TextField(blank=True)
image_two = models.ImageField(upload_to='news/news_photos')
file_two = models.FileField(upload_to='news/news_files', blank=True)
heading_three = models.CharField(max_length=500)
h3_paragraph1 = models.TextField()
h3_paragraph2 = models.TextField(blank=True)
h3_paragraph3 = models.TextField(blank=True)
image_three = models.ImageField(upload_to='news/news_photos')
file_three = models.FileField(upload_to='news/news_files', blank=True)
heading_four = models.CharField(max_length=500)
h4_paragraph1 = models.TextField()
h4_paragraph2 = models.TextField(blank=True)
h4_paragraph3 = models.TextField(blank=True)
image_four = models.ImageField(upload_to='news/news_photos')
file_four = models.FileField(upload_to='news/news_files', blank=True)
heading_five = models.CharField(max_length=500)
h5_paragraph1 = models.TextField()
h5_paragraph2 = models.TextField(blank=True)
h5_paragraph3 = models.TextField(blank=True)
image_five = models.ImageField(upload_to='news/news_photos')
file_five = models.FileField(upload_to='news/news_files', blank=True)
heading_six = models.CharField(max_length=500)
h6_paragraph1 = models.TextField()
h6_paragraph2 = models.TextField(blank=True)
h6_paragraph3 = models.TextField(blank=True)
image_six = models.ImageField(upload_to='news/news_photos')
file_six = models.FileField(upload_to='news/news_files', blank=True)
pub_date = models.DateTimeField('Date Published', auto_now_add=True, auto_now=False)
def __str__(self):
return str(self.pub_date)
# Create your models here.
| 2.09375 | 2 |
modules/twitter.py | iamsix/palbot | 3 | 12757818 | import discord
from discord.ext import tasks, commands
from urllib.parse import quote as uriquote
import html
from utils.time import human_timedelta
from datetime import datetime
import base64
class Twitter(commands.Cog):
"""All twittery functions like subscribe and lasttweet"""
def __init__(self, bot):
self.bot = bot
self.tweet_subscriptions.start() # pylint: disable=no-member
self.last_checked = {}
def cog_unload(self):
self.tweet_subscriptions.cancel() # pylint: disable=no-member
# TODO : Subscribe/unsubscribe functions here.
# Need a different config method for subs
# Preferably something I can keep in-memory with write on add/remove
# Possibly just a simple json file?
# consider ignore-retweets on subscription?
@commands.command(hidden=True)
@commands.is_owner()
async def twitter_token(self, ctx):
auth = f"{self.bot.config.twitterconsumerkey}:{self.bot.config.twitterconsumersecret}"
auth = "Basic " + base64.b64encode(auth.encode()).decode()
url = "https://api.twitter.com/oauth2/token"
body = {"grant_type" : "client_credentials"}
headers = {"Authorization": auth, "Content-Type" : "application/x-www-form-urlencoded;charset=UTF-8"}
async with self.bot.session.post(url, data=body, headers=headers) as resp:
response = await resp.json()
print(response)
@commands.command(name='lasttweet')
async def last_tweet(self, ctx, *, handle: str):
"""Show the last tweet of a twitter user"""
tweet = await self.read_timeline(handle)
if tweet:
#parsed = self.parse_tweet(tweet[0])
e = self.embed_tweet(tweet[0])
await ctx.send(embed=e)
#await ctx.send("{author}: {text} ({ago})".format(**parsed))
else:
await ctx.send(f"Failed to load tweets from twitter user @{handle}")
@commands.command(hidden=True)
async def trump(self, ctx):
"""Show trump's most recent words of wisdom"""
await self.last_tweet(ctx, handle='realDonaldTrump')
# TODO Handle retweets better
def embed_tweet(self, tweet):
handle = tweet['user']['screen_name']
link = f"https://twitter.com/{handle}/status/{tweet['id']}"
e = discord.Embed(title='Tweet', url=link, color=0x1da1f2)
author = f"{tweet['user']['name']} (@{handle})"
aurl = f"https://twitter.com/{handle}"
e.set_author(name=author, url=aurl, icon_url=tweet['user']['profile_image_url_https'])
e.description = html.unescape(tweet['full_text'].strip())
ts = datetime.strptime(tweet['created_at'], "%a %b %d %H:%M:%S +0000 %Y")
e.timestamp = ts
return e
def parse_tweet(self, tweet):
print(tweet)
updated = datetime.strptime(tweet['created_at'], "%a %b %d %H:%M:%S +0000 %Y")
ago = human_timedelta(updated, brief=True)
author = tweet['user']['screen_name']
text = html.unescape(tweet['full_text'].strip())
return {'author': author, 'text': text, "ago": ago, "updated": updated}
async def read_timeline(self, user, count=1):
url = "https://api.twitter.com/1.1/statuses/user_timeline.json"
params = {"screen_name": user, "count": count, "tweet_mode": "extended"}
headers = {"Authorization": "Bearer " + self.bot.config.twitter_token}
async with self.bot.session.get(url, params=params, headers=headers) as resp:
if resp.status == 200:
return await resp.json()
else:
return None
@tasks.loop(minutes=1.0)
async def tweet_subscriptions(self):
"""Reads a twitter timeline and posts the new tweets to any channels that sub it"""
subs = self.bot.config.twitter_subscriptions
for twitter_nick in subs:
if twitter_nick not in self.last_checked:
self.last_checked[twitter_nick] = datetime.utcnow()
self.bot.logger.info(f"Starting tweet loop. Last checked: {self.last_checked}")
tweets = await self.read_timeline(twitter_nick, count=3)
self.bot.logger.debug(f"Raw tweetsdata: {tweets}")
if not tweets:
continue
text = ""
data = None
# Newest tweets first, so reverse
for tweet in reversed(tweets):
data = self.parse_tweet(tweet)
self.bot.logger.debug(f"I have data {data}")
if data['updated'] > self.last_checked[twitter_nick]:
text += data['text'] + "\n"
self.bot.logger.debug(f"I have a tweet: {text}")
for channel in subs[twitter_nick]:
# a count of 3 per minute seems to work....
if data and text.strip():
self.last_checked[twitter_nick] = data['updated']
message = f"{data['author']}: {text.strip()}"
chan = self.bot.get_channel(channel)
if chan:
await chan.send(message)
def setup(bot):
bot.add_cog(Twitter(bot))
| 2.796875 | 3 |
demos/server.py | tiefenauer/forced-alignment | 8 | 12757819 | """adapted from: https://gist.github.com/shivakar/82ac5c9cb17c95500db1906600e5e1ea"""
import argparse
import os
import sys
from http.server import SimpleHTTPRequestHandler, HTTPServer
from os.path import realpath, join, dirname, isdir, exists
parser = argparse.ArgumentParser(description='Start simple HTTP server supporting HTTP/1.1 requests (needed to play'
'the aligned audio in HTML5)!')
parser.add_argument('cwd', type=str, nargs='?', default='htdocs',
help='(optional) directory to serve from (default: \'htdocs\')')
parser.add_argument('port', type=int, nargs='?', default=8000, help='(optional) port to use (default: 8000)')
args = parser.parse_args()
class RangeHTTPRequestHandler(SimpleHTTPRequestHandler):
"""RangeHTTPRequestHandler is a SimpleHTTPRequestHandler
with HTTP 'Range' support"""
def send_head(self):
"""Common code for GET and HEAD commands.
Return value is either a file object or None
"""
path = self.translate_path(self.path)
ctype = self.guess_type(path)
# Handling file location
# If directory, let SimpleHTTPRequestHandler handle the request
if isdir(path):
return SimpleHTTPRequestHandler.send_head(self)
# Handle file not found
if not exists(path):
return self.send_error(404, self.responses.get(404)[0])
# Handle file request
f = open(path, 'rb')
fs = os.fstat(f.fileno())
size = fs[6]
# Parse range header
# Range headers look like 'bytes=500-1000'
start, end = 0, size - 1
print('headers', self.headers.__dict__)
if 'Range' in self.headers:
start, end = self.headers.get('Range').strip().strip('bytes=').split('-')
if start == "":
# If no start, then the request is for last N bytes
## e.g. bytes=-500
try:
end = int(end)
except ValueError as e:
self.send_error(400, 'invalid range')
start = size - end
else:
try:
start = int(start)
except ValueError as e:
self.send_error(400, 'invalid range')
if start >= size:
# If requested start is greater than filesize
self.send_error(416, self.responses.get(416)[0])
if end == "":
# If only start is provided then serve till end
end = size - 1
else:
try:
end = int(end)
except ValueError as e:
self.send_error(400, 'invalid range')
# Correct the values of start and end
start = max(start, 0)
end = min(end, size - 1)
self.range = (start, end)
# Setup headers and response
l = end - start + 1
if 'Range' in self.headers:
self.send_response(206)
else:
self.send_response(200)
self.send_header('Content-type', ctype)
self.send_header('Accept-Ranges', 'bytes')
self.send_header('Content-Range',
'bytes %s-%s/%s' % (start, end, size))
self.send_header('Content-Length', str(l))
self.send_header('Last-Modified', self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def copyfile(self, infile, outfile):
"""Copies data between two file objects
If the current request is a 'Range' request then only the requested
bytes are copied.
Otherwise, the entire file is copied using SimpleHTTPServer.copyfile
"""
if 'Range' not in self.headers:
SimpleHTTPRequestHandler.copyfile(self, infile, outfile)
return
start, end = self.range
infile.seek(start)
bufsize = 64 * 1024 # 64KB
remainder = (end - start) % bufsize
times = int((end - start) / bufsize)
steps = [bufsize] * times + [remainder]
for astep in steps:
buf = infile.read(bufsize)
print('sending', infile, len(buf))
outfile.write(buf)
return
if __name__ == '__main__':
print(f'serving from {args.cwd} on port {args.port}')
os.chdir(join(dirname(realpath(__file__)), args.cwd))
server_address = ('', args.port)
HandlerClass = RangeHTTPRequestHandler
ServerClass = HTTPServer
HandlerClass.protocol_version = "HTTP/1.1"
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
sys.stderr.write(f'started MJC\n cwd={args.cwd} port={args.port}')
sys.stderr.flush()
print(f'Serving HTTP on {sa[0]}:{sa[1]} ... in background')
httpd.serve_forever()
| 3.1875 | 3 |
app/application_patterns/rdbms/db.py | andykmiles/code-boutique | 0 | 12757820 | """
show simplest database operation
"""
import sqlite3
sql_statements = (
"drop table if exists test",
"create table test (id, name)",
"insert into test values (1, 'abc')",
"insert into test values (2, 'def')",
"insert into test values (3, 'xyz')",
"select id, name from test",
)
def main():
""" run the sql """
conn = sqlite3.connect("dbms.db")
c = conn.cursor()
[c.execute(statement) for statement in sql_statements]
conn.commit()
rows = c.fetchall()
print(rows)
c.close()
conn.close()
if __name__ == "__main__":
main()
| 4.03125 | 4 |
app/admin/__init__.py | lp1225/-001 | 1 | 12757821 | #encoding: utf-8
from flask import Blueprint
admin = Blueprint('admin', '__name__')
# import views | 1.15625 | 1 |
mysite/myapp/views.py | rreyfockandroid/django | 0 | 12757822 | <filename>mysite/myapp/views.py
from django.http import HttpResponse
from datetime import datetime
import django
import platform
def helloX(request):
dver = django.get_version()
pver = platform.python_version()
text = "<h1>Witaj w mojej aplikacji! Version, python: %s, django: %s</h1>"% (pver, dver)
return HttpResponse(text)
def morning(request):
text = "Good morning"
return HttpResponse(text)
def viewArticle(request, articleId):
text = "Wyswietl artykul numer: %s" % articleId
return HttpResponse(text)
def viewArticles(request, month, year):
text = "Wyswietl artykul z: %s/%s" % (year, month)
return HttpResponse(text)
from django.shortcuts import render
def hello(request):
today = datetime.now().date()
infotext = 'nazywam sie alicja'
return render(request, "hello.html", {"today": today, "infotext": infotext})
| 2.5625 | 3 |
ci/build_matrix.py | dagrayvid/benchmark-wrapper | 14 | 12757823 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Create Github Actions Job Matrix for building dockerfiles.
Expects one optional input as the first positional argument. This is the upstream branch name, which
the current working tree will be compared against in order to understand if a benchmark should
be labeled as changed or not. If this input is not given, then "master" will be used.
A benchmark will be labeled as changed if any of the following conditions are met:
* A core component of benchmark-wrapper has changed, known as a 'bone'. Please see $bones for a list of
regex searches.
* Any of the files underneath the benchmark's module path
The JSON output looks like this, in accordance to the GHA Job Matrix Format:
```json
{
"include": [
{
"dockerfile": "path to dockerfile relative to repo root",
"image_name": "name of the image (i.e. name of directory containing the DF)",
"benchmark": "name of the benchmark (i.e. name of directory containing the DF)",
"env_var": "environment variable where image URL will be stored (i.e. <BENCHMARK>_IMAGE)",
"tag_suffix": "suffix of the image tag that should be used (i.e. arch of the DF with a dash)",
"tags": "space separated list of tags that should be applied to the image",
"arch": "architecture that the DF should be built on",
"changed": "whether or not changes have been made which require the benchmark to be tested",
},
...
]
}
```
If the `--manifest` option is given, then GHA job matrices will be printed which can be used for
building and pushing multi-arch image manifests to quay. The output looks like this:
```json
{
"build": "build matrix from above",
"manifest": {
"include": [
{
"benchmark": "name of the benchmark associated with the image",
"image_name": "name of the image",
"dockerfile": "relative path to dockerfile of image",
"tag": "tag the manifest will be built for",
"archs": "archictectures that should be added into the image manifest, space separated",
"tag_suffixes": "tag suffixes to add into the image manifest, space separated",
"changed": "whether or not changes have been made which require the benchmark to be tested",
},
...
]
}
}
"""
import argparse
import dataclasses
import json
import pathlib
import re
import shlex
import subprocess
from typing import Dict, Iterable, List, Set, Union
ARCHS = (
"amd64",
"arm64",
)
BONES = (
r"ci/",
r".github/workflows",
r"MANIFEST.in",
r"setup.py",
r"setup.cfg",
r"snafu/benchmarks/_[a-z]*.py",
r"snafu/[a-z]*.py",
r"tox.ini",
r"version.txt",
r"requirements/",
)
IGNORES = (r"Dockerfile\.ppc64le$",)
def get_git_diff(upstream_branch: str) -> str:
"""
Run git-diff against upstream branch.
Will pull fetch upstream branch to ensure it can be compared against.
Arguments
---------
upstream_branch : str
Upstream branch to compare against.
Returns
-------
str
Output of git diff
"""
subprocess.run(shlex.split(f"git fetch origin {upstream_branch}"), check=True)
completed_process = subprocess.run(
shlex.split(f"git diff origin/{upstream_branch} --name-only"),
check=True,
stdout=subprocess.PIPE,
)
return completed_process.stdout.decode("utf-8")
def parse_git_diff(diff_str: str) -> Set[str]:
"""
Return parsed output of `git-diff --name-only`.
Arguments
---------
diff_str : str
Output of `git-diff --name-only`.
Returns
-------
set of str
Unique set of files changed, according to git-diff
"""
return set(map(str.strip, diff_str.strip().split("\n")))
def get_dockerfile_list() -> str:
"""
Use the find command to get list of all dockerfiles within snafu.
Returns
-------
str
Output of find command
"""
completed_process = subprocess.run(
shlex.split("find snafu/ -name Dockerfile*"), check=True, stdout=subprocess.PIPE
)
return completed_process.stdout.decode("utf-8")
def parse_dockerfile_list(df_list: str) -> Set[str]:
"""
Parse given list of Dockerfiles into a set of str.
If a given Dockerfile path matches a regex in IGNORES, then the Dockerfile will
not be included in returned set.
Arguments
---------
df_list : str
Dockerfile list to parse. Should be newline-separated list of relative paths from
project root.
Returns
-------
set of str
Set of all unique dockerfile paths parsed from given input.
"""
result = []
for dockerfile in df_list.strip().split("\n"):
dockerfile = dockerfile.strip()
ignored = False
for ignore in IGNORES:
if re.search(ignore, dockerfile) is not None:
ignored = True
break
if not ignored:
result.append(dockerfile)
return set(result)
@dataclasses.dataclass
class MatrixEntry:
"""
Entry within the matrix.
See module docstring for details.
"""
dockerfile: str
image_name: str
benchmark: str
env_var: str
archs: Iterable[str]
changed: bool
tags: Iterable[str]
@classmethod
def new(cls, dockerfile: str, changed: bool, archs: Iterable[str], tags: Iterable[str]) -> "MatrixEntry":
"""
Create a new instances of the MatrixEntry
Parameters
----------
dockerfile : str
Relative path to Dockerfile. Will be used to determine other attributes.
changed : bool
Sets the changed attribute.
archs : list of str
Sets the archs attribute.
tags : list of str
Sets the tags attribute.
"""
benchmark = str(pathlib.Path(dockerfile).parent.stem).replace("_wrapper", "")
return cls(
dockerfile=dockerfile,
changed=changed,
archs=archs,
image_name=benchmark,
benchmark=benchmark,
env_var=f"{benchmark.upper()}_IMAGE",
tags=tags,
)
def build_json(self) -> Iterable[Dict[str, Union[str, bool]]]:
"""Convert the given MatrixEntry into series of JSON-dicts, one for each arch."""
for arch in self.archs:
tag_suffix = f"-{arch}"
yield {
"dockerfile": self.dockerfile,
"image_name": self.image_name,
"benchmark": self.benchmark,
"env_var": self.env_var,
"tag_suffix": tag_suffix,
"arch": arch,
"changed": self.changed,
"tags": " ".join([f"{tag}{tag_suffix}" for tag in self.tags]),
}
def manifest_json(self) -> Iterable[Dict[str, Union[str, bool]]]:
"""Convert the given MatrixEntry into series of JSON-dicts, one for each tag."""
for tag in self.tags:
tag_suffixes = " ".join([f"-{arch}" for arch in self.archs])
archs = " ".join(self.archs)
yield {
"benchmark": self.benchmark,
"image_name": self.image_name,
"dockerfile": self.dockerfile,
"tag": tag,
"tag_suffixes": tag_suffixes,
"changed": self.changed,
"archs": archs,
}
class MatrixBuilder:
"""
Builder for the GHA Jobs Matrix.
Parameters
----------
archs : iterable of str
List of architectures to build against. Will create a matrix entry for each architecture for each
Dockerfile.
tags : iterable of str
List of tags that will be applied to the built images.
bones : iterable of str
List of regex strings to match paths against to determine if the path is a snafu "bone".
upstream_branch : str
Upstream branch to compare changes to, in order to determine the value of "changed".
dockerfile_set : set of str
Set of dockerfiles within the snafu repository.
changed_set : set of str
Set of changed files within the snafu repository.
"""
def __init__(
self,
archs: Iterable[str],
tags: Iterable[str],
bones: Iterable[str],
upstream_branch: str,
dockerfile_set: Set[str],
changed_set: Set[str],
):
"""Contsruct the matrix builder."""
self.archs = archs
self.tags = tags
self.bones = bones
self.upstream_branch = upstream_branch
self.dockerfile_set = dockerfile_set
self.changed_set = changed_set
self.manifest_matrix: Dict[str, List[Dict[str, Union[str, bool]]]] = {}
self.build_matrix: Dict[str, List[Dict[str, Union[str, bool]]]] = {}
self.reset()
def reset(self):
"""Reset the matrix to empty starting point."""
self.build_matrix = {"include": []}
self.manifest_matrix = {"include": []}
def add_entry(self, entry: MatrixEntry):
"""Add the given MatrixEntry into the jobs matrix."""
for json_dict in entry.build_json():
self.build_matrix["include"].append(json_dict)
for json_dict in entry.manifest_json():
self.manifest_matrix["include"].append(json_dict)
def bones_changed(self) -> bool:
"""Return True if a bone has is found in the changed set."""
for bone in self.bones:
bone_regex = re.compile(bone)
for changed in self.changed_set:
if bone_regex.search(changed) is not None:
return True
return False
def benchmark_changed(self, dockerfile: str) -> bool:
"""Return True if the given dockerfile's benchmark has changed."""
dockerfile_dir = pathlib.Path(dockerfile).parent
for changed in self.changed_set:
try:
pathlib.Path(changed).relative_to(dockerfile_dir)
except ValueError:
pass
else:
return True
return False
def build(self, changed_only: bool = True):
"""
Build the GHA jobs matrix.
Parameters
----------
changed_only : bool, optional
If True, then only dockerfiles that are considered changed will be added into the matrix.
Defaults to True.
"""
bones_changed = self.bones_changed()
for dockerfile in self.dockerfile_set:
changed = bones_changed or self.benchmark_changed(dockerfile)
if (changed_only and changed) or not changed_only:
entry = MatrixEntry.new(
dockerfile=dockerfile, archs=self.archs, changed=changed, tags=self.tags
)
self.add_entry(entry)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("tags", nargs="+", help="Tags to apply to the built images")
parser.add_argument(
"--upstream",
default="master",
help="Upstream branch to compare against. Defaults to 'master'",
)
parser.add_argument("--changed-only", action="store_true", help="Only output changed Dockerfiles")
parser.add_argument(
"--manifest", action="store_true", help="Output both the build and manifest matrix JSON"
)
args = parser.parse_args()
builder = MatrixBuilder(
archs=ARCHS,
tags=args.tags,
bones=BONES,
upstream_branch=args.upstream,
dockerfile_set=parse_dockerfile_list(get_dockerfile_list()),
changed_set=parse_git_diff(get_git_diff(args.upstream)),
)
builder.build(changed_only=args.changed_only)
if args.manifest:
print(json.dumps({"build": builder.build_matrix, "manifest": builder.manifest_matrix}))
else:
print(json.dumps(builder.build_matrix))
| 2.578125 | 3 |
pytest/test_users.py | ribuild/delphin_6_automation | 2 | 12757824 | __author__ = "<NAME>"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import io
from contextlib import redirect_stdout
# RiBuild Modules
from delphin_6_automation.database_interactions.db_templates import user_entry
from delphin_6_automation.database_interactions import user_interactions
from delphin_6_automation.database_interactions.db_templates import delphin_entry
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
def test_add_user(empty_database):
user_interactions.create_account('User Test', '<EMAIL>')
assert user_entry.User.objects().first()
assert len(user_entry.User.objects()) == 1
def test_user_properties(empty_database):
u_email = '<EMAIL>'
u_name = '<NAME>'
user_interactions.create_account(u_name, u_email)
expected_user = user_entry.User.objects().first()
assert expected_user.name == u_name
assert expected_user.email == u_email
assert expected_user.simulations == []
assert not expected_user.password
def test_find_user_by_email(add_single_user):
expected_user = user_entry.User.objects().first()
found_user = user_interactions.find_account_by_email(expected_user.email)
assert expected_user.id == found_user.id
def test_add_simulation_to_user(db_one_project):
user = user_entry.User.objects().first()
simulation = delphin_entry.Delphin.objects().first()
user_interactions.add_simulation_to_user(user, simulation)
user.reload()
assert user.simulations
assert simulation.id == user.simulations[0].id
def test_user_simulations(db_one_project):
user = user_entry.User.objects().first()
simulation = delphin_entry.Delphin.objects().first()
user_interactions.add_simulation_to_user(user, simulation)
user.reload()
expected_out = f"ID: {simulation.id} - " \
f"Added: {simulation.added_date} - " \
f"With priority: {simulation.queue_priority}\n"
f = io.StringIO()
with redirect_stdout(f):
user_interactions.list_user_simulations(user)
out = f.getvalue()
assert expected_out == out
| 2.21875 | 2 |
simclr/loss/mix_loss.py | Lee-Gihun/MixCo-Mixup-Contrast | 40 | 12757825 | <reponame>Lee-Gihun/MixCo-Mixup-Contrast<filename>simclr/loss/mix_loss.py<gh_stars>10-100
import torch
import torch.nn as nn
import torch.nn.functional as F
class SoftCrossEntropy(nn.Module):
def __init__(self):
super(SoftCrossEntropy, self).__init__()
def forward(self, logits, target):
probs = F.softmax(logits, 1)
loss = (- target * torch.log(probs)).sum(1).mean()
return loss
| 2.234375 | 2 |
providers/services_areas/test/test_serializers.py | kamotos/mozio_providers | 0 | 12757826 | <reponame>kamotos/mozio_providers
from django.test import TestCase
from django.forms.models import model_to_dict
from nose.tools import eq_, ok_
from services_areas.serializers import ServiceAreaSerializer
from services_areas.test.factories import ServiceAreaFactory
class TestCreateUserSerializer(TestCase):
def setUp(self):
self.service_area_data = model_to_dict(ServiceAreaFactory.build())
def test_serializer_with_empty_data(self):
serializer = ServiceAreaSerializer(data={})
eq_(serializer.is_valid(), False)
def test_serializer_with_valid_data(self):
serializer = ServiceAreaSerializer(data=self.service_area_data)
ok_(serializer.is_valid(True))
def test_provider_owns_created_service_area(self):
pass
| 2.078125 | 2 |
discretize/mixins/omf_mod.py | ckohnke/discretize | 0 | 12757827 | """
A class for converting ``discretize`` meshes to OMF objects
"""
import omf
import numpy as np
import discretize
def ravel_data_array(arr, nx, ny, nz):
"""Ravel's a numpy array into proper order for passing to the OMF
specification from ``discretize``/UBC formats
"""
dim = (nz, ny, nx)
return np.reshape(arr, dim, order="C").ravel(order="F")
def unravel_data_array(arr, nx, ny, nz):
"""Unravel's a numpy array from the OMF specification to
``discretize``/UBC formats - the is the inverse of ``ravel_data_array``
"""
dim = (nz, ny, nx)
return np.reshape(arr, dim, order="F").ravel(order="C")
class InterfaceOMF(object):
def _tensor_mesh_to_omf(mesh, models=None):
"""
Constructs an :class:`omf.VolumeElement` object of this tensor mesh and
the given models as cell data of that grid.
Parameters
----------
mesh : discretize.TensorMesh
The tensor mesh to convert to a :class:`omf.VolumeElement`
models : dict(numpy.ndarray)
Name('s) and array('s). Match number of cells
"""
if models is None:
models = {}
# Make the geometry
geometry = omf.VolumeGridGeometry()
# Set tensors
tensors = mesh.h
if len(tensors) < 1:
raise RuntimeError(
"Your mesh is empty... fill it out before converting to OMF"
)
elif len(tensors) == 1:
geometry.tensor_u = tensors[0]
geometry.tensor_v = np.array(
[
0.0,
]
)
geometry.tensor_w = np.array(
[
0.0,
]
)
elif len(tensors) == 2:
geometry.tensor_u = tensors[0]
geometry.tensor_v = tensors[1]
geometry.tensor_w = np.array(
[
0.0,
]
)
elif len(tensors) == 3:
geometry.tensor_u = tensors[0]
geometry.tensor_v = tensors[1]
geometry.tensor_w = tensors[2]
else:
raise RuntimeError("This mesh is too high-dimensional for OMF")
# Set rotation axes
geometry.axis_u = mesh.axis_u
geometry.axis_v = mesh.axis_v
geometry.axis_w = mesh.axis_w
# Set the origin
geometry.origin = mesh.origin
# Make sure the geometry is built correctly
geometry.validate()
# Make the volume elemet (the OMF object)
omfmesh = omf.VolumeElement(
geometry=geometry,
)
# Add model data arrays onto the cells of the mesh
omfmesh.data = []
for name, arr in models.items():
data = omf.ScalarData(
name=name,
array=ravel_data_array(arr, *mesh.shape_cells),
location="cells",
)
omfmesh.data.append(data)
# Validate to make sure a proper OMF object is returned to the user
omfmesh.validate()
return omfmesh
def _tree_mesh_to_omf(mesh, models=None):
raise NotImplementedError("Not possible until OMF v2 is released.")
def _curvilinear_mesh_to_omf(mesh, models=None):
raise NotImplementedError("Not currently possible.")
def _cyl_mesh_to_omf(mesh, models=None):
raise NotImplementedError("Not currently possible.")
def to_omf(mesh, models=None):
"""Convert this mesh object to it's proper ``omf`` data object with
the given model dictionary as the cell data of that dataset.
Parameters
----------
models : dict(numpy.ndarray)
Name('s) and array('s). Match number of cells
"""
# TODO: mesh.validate()
converters = {
# TODO: 'tree' : InterfaceOMF._tree_mesh_to_omf,
"tensor": InterfaceOMF._tensor_mesh_to_omf,
# TODO: 'curv' : InterfaceOMF._curvilinear_mesh_to_omf,
# TODO: 'CylindricalMesh' : InterfaceOMF._cyl_mesh_to_omf,
}
key = mesh._meshType.lower()
try:
convert = converters[key]
except KeyError:
raise RuntimeError(
"Mesh type `{}` is not currently supported for OMF conversion.".format(
key
)
)
# Convert the data object
return convert(mesh, models=models)
@staticmethod
def _omf_volume_to_tensor(element):
"""Convert an :class:`omf.VolumeElement` to :class:`discretize.TensorMesh`"""
geometry = element.geometry
h = [geometry.tensor_u, geometry.tensor_v, geometry.tensor_w]
mesh = discretize.TensorMesh(h)
mesh.axis_u = geometry.axis_u
mesh.axis_v = geometry.axis_v
mesh.axis_w = geometry.axis_w
mesh.origin = geometry.origin
data_dict = {}
for data in element.data:
# NOTE: this is agnostic about data location - i.e. nodes vs cells
data_dict[data.name] = unravel_data_array(
np.array(data.array), *mesh.shape_cells
)
# Return TensorMesh and data dictionary
return mesh, data_dict
@staticmethod
def from_omf(element):
"""Convert an OMF element to it's proper ``discretize`` type.
Automatically determines the output type. Returns both the mesh and a
dictionary of model arrays.
"""
element.validate()
converters = {
omf.VolumeElement.__name__: InterfaceOMF._omf_volume_to_tensor,
}
key = element.__class__.__name__
try:
convert = converters[key]
except KeyError:
raise RuntimeError(
"OMF type `{}` is not currently supported for conversion.".format(key)
)
# Convert the data object
return convert(element)
| 3.15625 | 3 |
model_train.py | CharlesMure/cassiope-NIDS | 12 | 12757828 | <gh_stars>10-100
#! usr/bin/python3
import keras
import csv
import h5py
from keras.callbacks import TensorBoard, EarlyStopping, ModelCheckpoint
from keras import layers, models, optimizers
from keras import backend as K
from keras.utils import to_categorical
from capsulelayers import CapsuleLayer, PrimaryCap, Length, Mask
from keras.models import Sequential, model_from_json
from keras.layers import Conv2D, GlobalAveragePooling1D, MaxPooling2D
from keras.layers import Dense, Dropout, Activation, Embedding, BatchNormalization, Flatten
from keras.optimizers import SGD
from keras.wrappers.scikit_learn import KerasClassifier
from imblearn.over_sampling import SMOTE, ADASYN, RandomOverSampler
import numpy as np
from sklearn.preprocessing import LabelBinarizer, MinMaxScaler, LabelEncoder, RobustScaler
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
from sklearn.metrics import classification_report
def generate_cnn_model(shape):
'''
Model from a reasearch paper
https://www.researchgate.net/publication/319717354_A_Few-shot_Deep_Learning_Approach_for_Improved_Intrusion_Detection
'''
model = Sequential()
model.add(Conv2D(64, (3, 1), activation='relu', input_shape=(shape, 1, 1)))
model.add(Conv2D(64, (3, 1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 1)))
model.add(Conv2D(128, (3, 1), activation='relu'))
model.add(Conv2D(128, (3, 1), activation='relu', padding="same"))
model.add(Conv2D(128, (3, 1), activation='relu', padding="same"))
model.add(MaxPooling2D(pool_size=(2, 1)))
model.add(Conv2D(256, (3, 1), activation='relu', padding="same"))
model.add(Conv2D(256, (3, 1), activation='relu', padding="same"))
model.add(Conv2D(256, (3, 1), activation='relu', padding="same"))
model.add(MaxPooling2D(pool_size=(2, 1)))
model.add(Flatten())
model.add(Dense(100, kernel_initializer='normal', activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(20, kernel_initializer='normal',
activation='relu', name='output'))
model.add(Dense(10, kernel_initializer='normal', activation='softmax'))
return model
def preporcess(data, dataTest):
scaler = MinMaxScaler()
encoder = LabelBinarizer()
encoder2 = LabelEncoder()
# Isolate only the selected feature. See ReadMe for more details
x_train = data[:, [1,6,7,8,9,10,11,12,13,27,28,32,33,34,35,36]]
x_test = dataTest[:, [1,6,7,8,9,10,11,12,13,27,28,32,33,34,35,36]]
# Normalize features
x_train = x_train.astype(float)
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = x_test.astype(float)
scaler.fit(x_test)
x_test = scaler.transform(x_test)
# Retrieves the label from the dataset
train_label = data[:, 43]
test_label = dataTest[:, 43]
# Encode the attacks type. fit() generate a dictionnary text=>value
# [6:'Fuzzers',4:'Backdoor',1:'DoS',2:'Exploits',3:'Generic',5:'Reconnaissance',7:'Normal',8:'Shellcode',9:'Worms']
encoder2.fit(train_label)
y_train = encoder2.transform(train_label)
y_test = encoder2.transform(test_label)
# Transform to binary
encoder.fit(y_train)
y_train = encoder.transform(y_train)
y_test = encoder.transform(y_test)
# hack: CNN works with 3D inputs in Keras, so change vector of size x to [1,1,x] tab
x_final_train = []
x_final_test = []
size = np.size(x_train,axis=1)
for x in x_train:
sample = x.reshape([size, 1, 1])
x_final_train.append(sample)
x_train = np.array(x_final_train)
for x in x_test:
sample = x.reshape([size, 1, 1])
x_final_test.append(sample)
x_test = np.array(x_final_test)
# Split the dataset into test and validation sets
seed = 9
np.random.seed(seed)
x_validation, x_test_nn, y_validation, y_test_nn = train_test_split(
x_test, y_test, test_size=0.80, random_state=seed)
return x_train, y_train, x_validation, y_validation, x_test, y_test
def eval(model, x_test, y_test):
score = model.evaluate(x_test, y_test, verbose=1)
print("loss on test data:", score[0])
print("accuracy on test data:", score[1]*100, "%")
def main():
# Open the datasets for train and test
filereader = csv.reader(open("Data/UNSW-NB15/UNSW_NB15_training-set.csv"), delimiter=",")
data = np.array(list(filereader))
filereaderTest = csv.reader(open("Data/UNSW-NB15/UNSW_NB15_testing-set.csv"), delimiter=",")
dataTest = np.array(list(filereaderTest))
x_train, y_train, x_validation, y_validation, x_test, y_test = preporcess(
data, dataTest)
# Declare the model
model = generate_cnn_model(np.size(x_train,axis=1))
# Compile the network
opt = optimizers.Adam()
model.compile(loss='categorical_crossentropy',
optimizer=opt, metrics=['categorical_accuracy'])
# Early stopper declaration for the traning
stopper = EarlyStopping(monitor='val_binary_accuracy', patience=3, mode='auto')
# Learning
model.fit(x_train, y_train, epochs=1, batch_size=50, validation_data=(x_validation, y_validation), callbacks=[stopper])
# Evaluate the performance of the model
eval(model, x_test, y_test)
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
if __name__ == "__main__":
main()
| 1.984375 | 2 |
py4syn/utils/fit.py | gabrielpreviato/py4syn | 12 | 12757829 | import sys
import numpy as np
def tvDenoising1D(data, lamb):
"""
This function implements a 1-D Total Variation denoising according to <NAME>. (2013) "A direct algorithm for 1-D total variation denoising."
See also: `<NAME>. (2013). A direct algorithm for 1-D total variation denoising. IEEE Signal Processing Letters, 20(11), 1054–1057. doi:10.1109/LSP.2013.2278339 <http://dx.doi.org/10.1109/LSP.2013.2278339>`_
Parameters
----------
data : array
Data to be fit
lamb : float
.. note::
**lamb** must be nonnegative. **lamb = 0** will result in **output = data**.
Returns
-------
fitData: `array`
Examples
--------
>>> import pylab as pl
>>> data = 'testdata.txt'
>>> X = pl.loadtxt(data);
>>> x = X[:,0];
>>> data = X[:,7];
>>>
>>> denoised = tvDenoising1D(data, lamb=200)
>>>
>>> pl.plot(x, data, 'b')
>>> pl.hold(True)
>>> pl.plot(x, denoised, 'r--')
>>> pl.show()
"""
N = len(data)
k = k0 = k_ = kp = 0
vmin = data[0]-lamb
vmax = data[0]+lamb
umin = lamb
umax = -lamb
x = np.zeros(len(data))
while True:
# 2:
if(k == N):
return np.array([vmin+umin])
# Break condition to avoid overflow...
if k+1 >= N:
break
# 3:
if(data[k+1]+umin < vmin-lamb):
for i in range(k0, k_+1):
x[i] = vmin
x[k0] = x[k_] = vmin
k = k0 = k_ = kp = k_+1
vmin = data[k]
vmax = data[k]+(2*lamb)
umin = lamb
umax = -lamb
# 4:
elif(data[k+1]+umax > vmax+lamb):
for i in range(k0, kp+1):
x[i] = vmax
x[k0] = x[k_] = x[kp] = vmax
k = k0 = k_ = kp = kp+1
vmin = data[k]-(2*lamb)
vmax = data[k]
umin = lamb
umax = -lamb
# 5:
else:
k = k+1
umin = umin +data[k] - vmin
umax = umax + data[k] - vmax
# 6:
if(umin >= lamb):
vmin = vmin + ((umin -lamb)/(k-k0+1))
umin = lamb
k_ = k
if(umax <= -lamb):
vmax = vmax+((umax + lamb)/(k-k0+1))
umax = -lamb
kp = k
# 7:
if k < N:
continue
# 8:
if(umin < 0):
for i in range(k0, k_+1):
x[i] = vmin
k = k0 = k_ = k_ + 1
vmin = data[k]
umin = lamb
umax = data[k] + lamb - vmax
continue
# 9:
elif(umax > 0):
for i in range(k0, kp+1):
x[i] = vmax
k = k0 = kp = kp+1
vmax = data[k]
umax = -lamb
umin = data[k]-lamb-vmin
continue
else:
for i in range(k0, N):
x[i] = vmin+(umin/(k-k0+1))
break
return x
def fitGauss(xarray, yarray):
"""
This function mix a Linear Model with a Gaussian Model (LMFit).
See also: `Lmfit Documentation <http://cars9.uchicago.edu/software/python/lmfit/>`_
Parameters
----------
xarray : array
X data
yarray : array
Y data
Returns
-------
peak value: `float`
peak position: `float`
min value: `float`
min position: `float`
fwhm: `float`
fwhm positon: `float`
center of mass: `float`
fit_Y: `array`
fit_result: `ModelFit`
Examples
--------
>>> import pylab as pl
>>> data = 'testdata.txt'
>>> X = pl.loadtxt(data);
>>> x = X[:,0];
>>> y = X[:,7];
>>>
>>> pkv, pkp, minv, minp, fwhm, fwhmp, com = fitGauss(x, y)
>>> print("Peak ", pkv, " at ", pkp)
>>> print("Min ", minv, " at ", minp)
>>> print("Fwhm ", fwhm, " at ", fwhmp)
>>> print("COM = ", com)
>>>
"""
from lmfit.models import GaussianModel, LinearModel
y = yarray
x = xarray
gaussMod = GaussianModel()
linMod = LinearModel()
pars = linMod.make_params(intercept=y.min(), slope=0)
pars += linMod.guess(y, x=x)
pars += gaussMod.guess(y, x=x)
mod = gaussMod + linMod
fwhm = 0
fwhm_position = 0
try:
result = mod.fit(y, pars, x=x)
fwhm = result.values['fwhm']
fwhm_position = result.values['center']
except:
result = None
peak_position = xarray[np.argmax(y)]
peak = np.max(y)
minv_position = x[np.argmin(y)]
minv = np.min(y)
COM = (np.multiply(x,y).sum())/y.sum()
return (peak, peak_position, minv, minv_position, fwhm, fwhm_position, COM, result)
if __name__ == '__main__':
import pylab as pl
#file = '/home/ABTLUS/hugo.slepicka/devfiles/workspacePython/FIT_Test/teste'
file = "/home/ABTLUS/hugo.slepicka/SVN/Py4Syn/trunk/lab6_summed.dat"
X = np.loadtxt(file);
x = X[:,0];
y = X[:,1];
#x = np.asarray([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
#y = np.asarray([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
#peak, peak_position, minv, minv_position, fwhm, fwhm_position, COM, result = fitGauss(x, y)
#print("COM = ", result)
data = y
denoised = tvDenoising1D(data, lamb=200)
pl.plot(x, data, 'b')
pl.hold(True)
pl.plot(x, denoised, 'r--')
pl.show()
| 3.171875 | 3 |
backend/backend/settings/__init__.py | swang192/portunus | 0 | 12757830 | from .zygoat_settings import * # noqa
| 1.03125 | 1 |
angello-venv/Scripts/gprof2dot-script.py | Kene-repo/angello | 0 | 12757831 | #!C:\Users\user\myprojects\angello\angello-venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'gprof2dot==2016.10.13','console_scripts','gprof2dot'
__requires__ = 'gprof2dot==2016.10.13'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('gprof2dot==2016.10.13', 'console_scripts', 'gprof2dot')()
)
| 1.921875 | 2 |
rnn21cm/database.py | dprelogo/21cmRNN | 0 | 12757832 | """Functions and utilities used to format the databases."""
import numpy as np
import jax.numpy as jnp
from scipy.integrate import quadrature
import tools21cm as t2c
def apply_uv_coverage(Box_uv, uv_bool):
"""Apply UV coverage to the data.
Args:
Box_uv: data box in Fourier space
uv_bool: mask of measured baselines
Returns:
Box_uv
"""
Box_uv = Box_uv * uv_bool
return Box_uv
def compute_uv_coverage(redshifts, ncells=200, boxsize=300):
"""Computing UV coverage box for SKA antenna configuration.
Args:
redshifts: list of redshifts for which the UV coverage is computed.
ncells: lsize of a grid in UV space (in pixels)
boxsize: size of the simulation (in Mpc)
Returns:
uv: UV coverage box
"""
uv = np.empty((ncells, ncells, len(redshifts)))
for i in range(len(redshifts)):
print(i, end=" ")
uv[..., i], _ = t2c.noise_model.get_uv_map(
ncells=200, z=redshifts[i], boxsize=300
)
return uv
def noise(seed, redshifts, uv, ncells=200, boxsize=300.0, obs_time=1000, N_ant=512):
"""Computing telescope thermal noise.
Args:
seed: noise seed
redshifts: list of redshifts for each slice of UV
uv: UV coveragebox
ncells: size of a box in real/UV space (in pixels)
boxsize: size of the simulation (in Mpc)
obs_time: total observation time (in hours)
N_ant: number of antennas in the configuration
Returns:
finalBox: noise in UV space
"""
redshifts = np.append(
redshifts, 2 * redshifts[-1] - redshifts[-2]
) # appending the last difference
finalBox = np.empty(uv.shape, dtype=np.complex64)
for i in range(uv.shape[-1]):
depth_mhz = t2c.cosmology.z_to_nu(redshifts[i]) - t2c.cosmology.z_to_nu(
redshifts[i + 1]
)
noise = t2c.noise_model.noise_map(
ncells=ncells,
z=redshifts[i],
depth_mhz=depth_mhz,
obs_time=obs_time,
boxsize=boxsize,
uv_map=uv[..., i],
N_ant=N_ant,
seed=10000 * seed + i,
)
noise = t2c.telescope_functions.jansky_2_kelvin(
noise, redshifts[i], boxsize=boxsize
).astype(np.complex64)
finalBox[..., i] = noise
return finalBox
def wedge_removal(
OMm,
redshifts,
HII_DIM,
cell_size,
Box_uv,
chunk_length=501,
blackman=True,
):
"""Computing horizon wedge removal. Implements "sliding" procedure
of removing the wedge for every redshift separately.
Args:
OMm: Omega matter
redshifts: list of redshifts in a lightcone
HII_DIM: size of the HII simulation box (see `21cmFASTv3`)
cell_size: size of a cell in Mpc
Box_uv: box in UV space on which wedge removal is to be computed
chunk_length: length of a sliding chunk (in number of z-slices)
blackman: either to use Blackman-Harris taper or not
Returns:
Box_final: wedge-removed box in real space
"""
def one_over_E(z, OMm):
return 1 / np.sqrt(OMm * (1.0 + z) ** 3 + (1 - OMm))
def multiplicative_factor(z, OMm):
return (
1
/ one_over_E(z, OMm)
/ (1 + z)
* quadrature(lambda x: one_over_E(x, OMm), 0, z)[0]
)
MF = jnp.array([multiplicative_factor(z, OMm) for z in redshifts]).astype(
np.float32
)
redshifts = jnp.array(redshifts).astype(np.float32)
k = jnp.fft.fftfreq(HII_DIM, d=cell_size)
k_parallel = jnp.fft.fftfreq(chunk_length, d=cell_size)
delta_k = k_parallel[1] - k_parallel[0]
k_cube = jnp.meshgrid(k, k, k_parallel)
bm = jnp.abs(jnp.fft.fft(jnp.blackman(chunk_length))) ** 2
buffer = delta_k * (jnp.where(bm / jnp.amax(bm) <= 1e-10)[0][0] - 1)
BM = jnp.blackman(chunk_length)[jnp.newaxis, jnp.newaxis, :]
box_shape = Box_uv.shape
Box_final = np.empty(box_shape, dtype=np.float32)
empty_box = jnp.zeros(k_cube[0].shape)
Box_uv = jnp.concatenate(
(empty_box, jnp.array(Box_uv, dtype=jnp.float32), empty_box), axis=2
)
for i in range(chunk_length, box_shape[-1] + chunk_length):
t_box = Box_uv[..., i - chunk_length // 2 : i + chunk_length // 2 + 1]
W = k_cube[2] / (
jnp.sqrt(k_cube[0] ** 2 + k_cube[1] ** 2)
* MF[min(i - chunk_length // 2 - 1, box_shape[-1] - 1)]
+ buffer
)
w = jnp.logical_or(W < -1.0, W > 1.0)
# w = cp.array(W[i + chunk_length - 1])
if blackman == True:
t_box = t_box * BM
Box_final[..., i - chunk_length] = jnp.real(
jnp.fft.ifftn(jnp.fft.fft(t_box, axis=-1) * w)
)[
..., chunk_length // 2
] # taking only middle slice in redshift
return Box_final.astype(np.float32)
def BoxCar3D(data, filter=(4, 4, 4)):
"""Computing BoxCar filter on the input data.
Args:
data: data to filter
filter: filter shape
Returns:
filtered data
"""
if len(data.shape) != 3:
raise AttributeError("data has to be 3D")
if len(filter) != 3:
raise AttributeError("filter has to be 3D")
s = data.shape
Nx, Ny, Nz = filter
return jnp.einsum(
"ijklmn->ikm",
data[: s[0] // Nx * Nx, : s[1] // Ny * Ny, : s[2] // Nz * Nz].reshape(
(s[0] // Nx, Nx, s[1] // Ny, Ny, s[2] // Nz, Nz)
),
) / (Nx * Ny * Nz)
| 2.875 | 3 |
futsu/gcp/test/test_storage.py | luzi82/py.futsu | 0 | 12757833 | <reponame>luzi82/py.futsu
from unittest import TestCase
from futsu.gcp import storage as fstorage
import futsu.fs as ffs
import tempfile
import os
from google.cloud import storage as gcstorage
import time
import string
import random
class TestStorage(TestCase):
def test_is_bucket_path(self):
self.assertTrue(fstorage.is_bucket_path('gs://bucket'))
self.assertTrue(fstorage.is_bucket_path('gs://bucket/'))
self.assertFalse(fstorage.is_bucket_path('gs://bucket//'))
self.assertFalse(fstorage.is_bucket_path('gs://bucket/asdf'))
self.assertFalse(fstorage.is_bucket_path('gs://bucket/asdf/'))
self.assertFalse(fstorage.is_bucket_path('gs://bucket/asdf/asdf'))
self.assertFalse(fstorage.is_bucket_path('s://bucket'))
self.assertFalse(fstorage.is_bucket_path('g://bucket'))
self.assertFalse(fstorage.is_bucket_path('gs//bucket'))
self.assertFalse(fstorage.is_bucket_path('gs:/bucket'))
self.assertFalse(fstorage.is_bucket_path('gs://'))
self.assertFalse(fstorage.is_bucket_path('gs:///'))
self.assertFalse(fstorage.is_bucket_path('gs:///asdf'))
def test_is_blob_path(self):
self.assertFalse(fstorage.is_blob_path('gs://bucket'))
self.assertFalse(fstorage.is_blob_path('gs://bucket/'))
self.assertTrue(fstorage.is_blob_path('gs://bucket//'))
self.assertTrue(fstorage.is_blob_path('gs://bucket/asdf'))
self.assertTrue(fstorage.is_blob_path('gs://bucket/asdf/'))
self.assertTrue(fstorage.is_blob_path('gs://bucket/asdf/asdf'))
self.assertFalse(fstorage.is_blob_path('s://bucket'))
self.assertFalse(fstorage.is_blob_path('g://bucket'))
self.assertFalse(fstorage.is_blob_path('gs//bucket'))
self.assertFalse(fstorage.is_blob_path('gs:/bucket'))
self.assertFalse(fstorage.is_blob_path('gs://'))
self.assertFalse(fstorage.is_blob_path('gs:///'))
self.assertFalse(fstorage.is_blob_path('gs:///asdf'))
def test_is_path(self):
self.assertTrue(fstorage.is_path('gs://bucket'))
self.assertTrue(fstorage.is_path('gs://bucket/'))
self.assertTrue(fstorage.is_path('gs://bucket//'))
self.assertTrue(fstorage.is_path('gs://bucket/asdf'))
self.assertTrue(fstorage.is_path('gs://bucket/asdf/'))
self.assertTrue(fstorage.is_path('gs://bucket/asdf/asdf'))
self.assertFalse(fstorage.is_path('s://bucket'))
self.assertFalse(fstorage.is_path('g://bucket'))
self.assertFalse(fstorage.is_path('gs//bucket'))
self.assertFalse(fstorage.is_path('gs:/bucket'))
self.assertFalse(fstorage.is_path('gs://'))
self.assertFalse(fstorage.is_path('gs:///'))
self.assertFalse(fstorage.is_path('gs:///asdf'))
def test_parse_bucket_path(self):
self.assertEqual(fstorage.prase_bucket_path('gs://asdf'), 'asdf')
self.assertRaises(ValueError, fstorage.prase_bucket_path, 'asdf')
def test_prase_blob_path(self):
self.assertEqual(fstorage.prase_blob_path('gs://asdf/qwer'), ('asdf', 'qwer'))
self.assertEqual(fstorage.prase_blob_path('gs://asdf/qwer/'), ('asdf', 'qwer/'))
self.assertRaises(ValueError, fstorage.prase_blob_path, 'asdf')
def test_gcp_string(self):
token = '{ts}-{r}'.format(ts=int(time.time()), r=randstr())
tmp_gs_path = 'gs://futsu-test/test-LAVVKOIHAT-{0}'.format(token)
client = gcstorage.client.Client()
fstorage.string_to_blob(tmp_gs_path, 'JLPUSLMIHV', client)
s = fstorage.blob_to_string(tmp_gs_path, client)
self.assertEqual(s, 'JLPUSLMIHV')
def test_gcp_bytes(self):
token = '{ts}-{r}'.format(ts=int(time.time()), r=randstr())
tmp_gs_path = 'gs://futsu-test/test-SCALNUVEVQ-{0}'.format(token)
client = gcstorage.client.Client()
fstorage.bytes_to_blob(tmp_gs_path, b'VUOUWXZNIA', client)
s = fstorage.blob_to_bytes(tmp_gs_path, client)
self.assertEqual(s, b'VUOUWXZNIA')
def test_gcp_file(self):
client = gcstorage.client.Client()
with tempfile.TemporaryDirectory() as tempdir:
token = '{ts}-{r}'.format(ts=int(time.time()), r=randstr())
src_fn = os.path.join('futsu', 'gcp', 'test', 'test_storage.txt')
tmp_gs_path = 'gs://futsu-test/test-CQJWTXYXEJ-{0}'.format(token)
tmp_filename = os.path.join(tempdir, 'PKQXWFJWRB')
fstorage.file_to_blob(tmp_gs_path, src_fn, client)
fstorage.blob_to_file(tmp_filename, tmp_gs_path, client)
self.assertFalse(ffs.diff(src_fn, tmp_filename))
def test_exist(self):
token = '{ts}-{r}'.format(ts=int(time.time()), r=randstr())
tmp_gs_path = 'gs://futsu-test/test-NKLUNOKTWZ-{0}'.format(token)
client = gcstorage.client.Client()
self.assertFalse(fstorage.is_blob_exist(tmp_gs_path, client))
fstorage.string_to_blob(tmp_gs_path, 'DQJDDJMULZ', client)
self.assertTrue(fstorage.is_blob_exist(tmp_gs_path, client))
def test_delete(self):
token = '{ts}-{r}'.format(ts=int(time.time()), r=randstr())
tmp_gs_path = 'gs://futsu-test/test-EYVNPCTBAH-{0}'.format(token)
client = gcstorage.client.Client()
self.assertFalse(fstorage.is_blob_exist(tmp_gs_path, client))
fstorage.blob_rm(tmp_gs_path, client)
self.assertFalse(fstorage.is_blob_exist(tmp_gs_path, client))
fstorage.string_to_blob(tmp_gs_path, 'BHAHMMJVYF', client)
self.assertTrue(fstorage.is_blob_exist(tmp_gs_path, client))
fstorage.blob_rm(tmp_gs_path, client)
self.assertFalse(fstorage.is_blob_exist(tmp_gs_path, client))
def test_find_blob_itr(self):
client = gcstorage.client.Client()
token = '{ts}-{r}'.format(ts=int(time.time()), r=randstr())
tmp_gs_path_list = ['gs://futsu-test/test-QMKOGJVS-{0}/{1}'.format(token, i) for i in range(10)]
for tmp_gs_path in tmp_gs_path_list:
fstorage.bytes_to_blob(tmp_gs_path, b'TBJSUSIE', client)
blob_list = fstorage.find_blob_itr('gs://futsu-test/test-QMKOGJVS-{0}/'.format(token), client)
blob_list = list(blob_list)
self.assertEqual(len(blob_list), 10)
blob_list = sorted(blob_list)
self.assertEqual(blob_list, tmp_gs_path_list)
def test_join(self):
self.assertEqual(fstorage.join('gs://NARNEHCQ', 'UDGTMPFX'), 'gs://NARNEHCQ/UDGTMPFX')
self.assertEqual(fstorage.join('gs://NARNEHCQ', 'UDGTMPFX', 'AFOCASQL'), 'gs://NARNEHCQ/UDGTMPFX/AFOCASQL')
def test_split(self):
self.assertEqual(fstorage.split('gs://NARNEHCQ/UDGTMPFX'), ('gs://NARNEHCQ', 'UDGTMPFX'))
self.assertEqual(fstorage.split('gs://NARNEHCQ/UDGTMPFX/AFOCASQL'), ('gs://NARNEHCQ/UDGTMPFX', 'AFOCASQL'))
def test_dirname(self):
self.assertEqual(fstorage.dirname('gs://NARNEHCQ/UDGTMPFX'), 'gs://NARNEHCQ')
self.assertEqual(fstorage.dirname('gs://NARNEHCQ/UDGTMPFX/AFOCASQL'), 'gs://NARNEHCQ/UDGTMPFX')
def test_basename(self):
self.assertEqual(fstorage.basename('gs://NARNEHCQ/UDGTMPFX'), 'UDGTMPFX')
self.assertEqual(fstorage.basename('gs://NARNEHCQ/UDGTMPFX/AFOCASQL'), 'AFOCASQL')
def test_rmtree(self):
token = '{ts}-{r}'.format(ts=int(time.time()), r=randstr())
path0 = 'gs://futsu-test/test-HOSPFEUB-{token}'.format(token=token)
path00 = fstorage.join(path0, 'ITGDLUVB')
path000 = fstorage.join(path00, 'WKBXFDTH', 'CMCXBJYN')
path001 = fstorage.join(path00, 'MGNZJTXL', 'RGWIYPEG')
path01 = fstorage.join(path0, 'GMZSNRPD', 'UOAUKUKG', 'VJUOXIQY')
path02 = fstorage.join(path0, 'ITGDLUVBx')
gcs_client = gcstorage.client.Client()
fstorage.bytes_to_blob(path000, b'', gcs_client)
fstorage.bytes_to_blob(path001, b'', gcs_client)
fstorage.bytes_to_blob(path01, b'', gcs_client)
fstorage.bytes_to_blob(path02, b'', gcs_client)
self.assertTrue(fstorage.is_blob_exist(path000, gcs_client))
self.assertTrue(fstorage.is_blob_exist(path001, gcs_client))
self.assertTrue(fstorage.is_blob_exist(path01, gcs_client))
self.assertTrue(fstorage.is_blob_exist(path02, gcs_client))
fstorage.rmtree(path00, gcs_client)
self.assertFalse(fstorage.is_blob_exist(path000, gcs_client))
self.assertFalse(fstorage.is_blob_exist(path001, gcs_client))
self.assertTrue(fstorage.is_blob_exist(path01, gcs_client))
self.assertTrue(fstorage.is_blob_exist(path02, gcs_client))
def randstr():
charset = list(set(string.ascii_letters) | set(string.digits))
return "".join(random.choice(charset)for x in range(8))
| 2.59375 | 3 |
app/repositories/student_event_repo.py | jattoabdul/vanhack-cms | 0 | 12757834 | from app.repositories.base_repo import BaseRepo
from app.models.student_event import StudentEvent
class StudentEventRepo(BaseRepo):
def __init__(self):
BaseRepo.__init__(self, StudentEvent)
def new_student_event(self, event_id, student_id):
student_event = StudentEvent(event_id=event_id, student_id=student_id)
student_event.save()
return student_event
| 2.203125 | 2 |
ckan/tests/test_authz.py | HHS/ckan | 2 | 12757835 | <gh_stars>1-10
import sqlalchemy as sa
from pylons import config
from nose.tools import make_decorator, assert_equal
import ckan.model as model
import ckan.authz
from ckan import plugins
from ckan.model import Role
from ckan.tests import *
def uses_example_auth_plugin(func):
def decorate(func):
def wrapper(*args, **kwargs):
def _plugin_setup():
from ckan.tests.test_plugins import install_ckantestplugin
_saved_plugins_config = config.get('ckan.plugins', '')
install_ckantestplugin()
config['ckan.plugins'] = 'authorizer_plugin'
plugins.load_all(config)
return _saved_plugins_config
def _plugin_teardown(_saved_plugins_config):
plugins.unload_all()
config['ckan.plugins'] = _saved_plugins_config
plugins.load_all(config)
_saved_plugins_config = _plugin_setup()
func(*args, **kwargs)
_plugin_teardown(_saved_plugins_config)
wrapper = make_decorator(func)(wrapper)
return wrapper
return decorate(func)
class TestBlacklister(object):
def test_1(self):
blacklister = ckan.authz.Blacklister()
bad_username = u'172.16.58.3' # in test.ini
good_username = u'172.16.58.3'
good_username2 = u'testadmin'
assert blacklister.is_blacklisted(bad_username)
assert not blacklister.is_blacklisted(good_username)
assert not blacklister.is_blacklisted(good_username2)
class TestAuthorizer(object):
@classmethod
def setup_class(self):
CreateTestData.create()
model.repo.new_revision()
model.Session.add(model.Package(name=u'testpkg'))
model.Session.add(model.Package(name=u'testpkg2'))
model.Session.add(model.Package(name=u'private_pkg'))
model.Session.add(model.User(name=u'testadmin'))
# Cannot setup testsysadmin user as it is alreade done in
# the default test data.
#model.Session.add(model.User(name=u'testsysadmin'))
model.Session.add(model.User(name=u'notadmin'))
model.Session.add(model.Group(name=u'testgroup'))
model.Session.add(model.Group(name=u'testgroup2'))
model.repo.commit_and_remove()
model.repo.new_revision()
pkg = model.Package.by_name(u'testpkg')
pkg2 = model.Package.by_name(u'testpkg2')
private_pkg = model.Package.by_name(u'private_pkg')
pkg.add_relationship(u'depends_on', pkg2)
pkg.add_relationship(u'depends_on', private_pkg)
model.repo.commit_and_remove()
pkg = model.Package.by_name(u'testpkg')
grp = model.Group.by_name(u'testgroup')
admin = model.User.by_name(u'testadmin')
sysadmin = model.User.by_name(u'testsysadmin')
notadmin = model.User.by_name(u'notadmin')
model.add_user_to_role(admin, model.Role.ADMIN, pkg)
model.add_user_to_role(admin, model.Role.ADMIN, grp)
model.add_user_to_role(sysadmin, model.Role.ADMIN, model.System())
model.add_user_to_role(notadmin, model.Role.READER, pkg)
model.add_user_to_role(notadmin, model.Role.READER, pkg2)
model.repo.commit_and_remove()
self.authorizer = ckan.authz.Authorizer()
self.pkg = model.Package.by_name(u'testpkg')
self.pkg2 = model.Package.by_name(u'testpkg2')
self.private_pkg = model.Package.by_name(u'private_pkg')
self.grp = model.Group.by_name(u'testgroup')
self.grp2 = model.Group.by_name(u'testgroup2')
self.admin = model.User.by_name(u'testadmin')
self.sysadmin = model.User.by_name(u'testsysadmin')
self.notadmin = model.User.by_name(u'notadmin')
@classmethod
def teardown_class(self):
model.Session.remove()
model.repo.rebuild_db()
model.Session.remove()
authorizer = ckan.authz.Authorizer()
def test_pkg_admin(self):
action = model.Action.PURGE
assert self.authorizer.is_authorized(self.admin.name, action, self.pkg)
assert not self.authorizer.is_authorized(self.admin.name, action, self.pkg2)
assert not self.authorizer.is_authorized(u'blah', action, self.pkg)
@uses_example_auth_plugin
def test_pkg_admin_with_plugin(self):
action = model.Action.PURGE
assert self.authorizer.is_authorized(self.notadmin.name,
action,
self.pkg2)
def test_grp_admin(self):
action = model.Action.PURGE
assert self.authorizer.is_authorized(self.admin.name, action, self.grp)
assert not self.authorizer.is_authorized(self.admin.name, action, self.grp2)
assert not self.authorizer.is_authorized(u'blah', action, self.grp)
def test_pkg_sys_admin(self):
action = model.Action.PURGE
assert self.authorizer.is_authorized(self.sysadmin.name, action, self.pkg)
assert self.authorizer.is_authorized(self.sysadmin.name, action, self.pkg2)
assert not self.authorizer.is_authorized(u'blah', action, self.pkg)
def test_grp_sys_admin(self):
action = model.Action.PURGE
assert self.authorizer.is_authorized(self.sysadmin.name, action, self.grp)
assert self.authorizer.is_authorized(self.sysadmin.name, action, self.grp2)
assert not self.authorizer.is_authorized(u'blah', action, self.grp)
def test_blacklist_edit_pkg(self):
action = model.Action.EDIT
username = u'testadmin'
bad_username = u'172.16.58.3'
assert self.authorizer.is_authorized(self.admin.name, action, self.pkg)
assert not self.authorizer.is_authorized(bad_username, action, self.pkg)
def test_blacklist_edit_grp(self):
action = model.Action.EDIT
username = u'testadmin'
bad_username = u'172.16.58.3'
assert self.authorizer.is_authorized(self.admin.name, action, self.grp)
assert not self.authorizer.is_authorized(bad_username, action, self.grp)
def test_revision_purge(self):
action = model.Action.PURGE
isa = self.authorizer.is_authorized(self.sysadmin.name, action,
model.Revision)
assert isa, isa
isnot = self.authorizer.is_authorized(self.notadmin.name, action,
model.Revision)
assert not isnot, isnot
def test_authorized_query(self):
assert self.authorizer.is_authorized(self.notadmin.name, model.Action.READ, self.pkg)
assert not self.authorizer.is_authorized(self.notadmin.name, model.Action.READ, self.private_pkg)
q = self.authorizer.authorized_query(self.notadmin.name, model.Package)
pkgs = set([pkg.name for pkg in q.all()])
expected_pkgs = set([u'testpkg', u'testpkg2', u'annakarenina', u'warandpeace'])
assert_equal(pkgs, expected_pkgs)
def _assert_relationships(self, rels, expected_rels):
rels = set([repr(rel) for rel in rels])
assert_equal(rels, set(expected_rels))
def test_package_relationships_as_notadmin(self):
rels = self.authorizer.authorized_package_relationships( \
self.notadmin.name, self.pkg, None, action=model.Action.READ)
self._assert_relationships(rels, ['<*PackageRelationship testpkg depends_on testpkg2>'])
rels = self.authorizer.authorized_package_relationships( \
self.notadmin.name, self.pkg, self.pkg2, action=model.Action.READ)
self._assert_relationships(rels, ['<*PackageRelationship testpkg depends_on testpkg2>'])
def test_package_relationships_as_sysadmin(self):
rels = self.authorizer.authorized_package_relationships( \
self.sysadmin.name, self.pkg, None, action=model.Action.READ)
self._assert_relationships(rels, ['<*PackageRelationship testpkg depends_on testpkg2>', '<*PackageRelationship testpkg depends_on private_pkg>'])
rels = self.authorizer.authorized_package_relationships( \
self.notadmin.name, self.pkg, self.pkg2, action=model.Action.READ)
self._assert_relationships(rels, ['<*PackageRelationship testpkg depends_on testpkg2>'])
class TestLockedDownAuthorizer(object):
@classmethod
def setup_class(self):
CreateTestData.create()
q = model.Session.query(model.UserObjectRole).filter(sa.or_(model.UserObjectRole.role==Role.EDITOR,
model.UserObjectRole.role==Role.ANON_EDITOR,
model.UserObjectRole.role==Role.READER))
q = q.filter(model.UserObjectRole.user==model.User.by_name(u"visitor"))
for role in q:
model.Session.delete(role)
model.repo.commit_and_remove()
model.repo.new_revision()
model.Session.add(model.Package(name=u'testpkg'))
model.Session.add(model.Package(name=u'testpkg2'))
model.Session.add(model.User(name=u'testadmin'))
# Cannot setup testsysadmin user as it is alreade done in
# the default test data.
#model.Session.add(model.User(name=u'testsysadmin'))
model.Session.add(model.User(name=u'notadmin'))
model.Session.add(model.Group(name=u'testgroup'))
model.Session.add(model.Group(name=u'testgroup2'))
model.repo.commit_and_remove()
pkg = model.Package.by_name(u'testpkg')
grp = model.Group.by_name(u'testgroup')
admin = model.User.by_name(u'testadmin')
sysadmin = model.User.by_name(u'testsysadmin')
model.add_user_to_role(admin, model.Role.ADMIN, pkg)
model.add_user_to_role(admin, model.Role.ADMIN, grp)
model.add_user_to_role(sysadmin, model.Role.ADMIN, model.System())
model.repo.commit_and_remove()
self.authorizer = ckan.authz.Authorizer()
self.pkg = model.Package.by_name(u'testpkg')
self.pkg2 = model.Package.by_name(u'testpkg2')
self.grp = model.Group.by_name(u'testgroup')
self.grp2 = model.Group.by_name(u'testgroup2')
self.admin = model.User.by_name(u'testadmin')
self.sysadmin = model.User.by_name(u'testsysadmin')
self.notadmin = model.User.by_name(u'notadmin')
@classmethod
def teardown_class(self):
model.Session.remove()
model.repo.rebuild_db()
model.Session.remove()
authorizer = ckan.authz.Authorizer()
def test_pkg_create(self):
action = model.Action.PACKAGE_CREATE
assert self.authorizer.is_authorized(self.admin.name, action, model.System())
assert self.authorizer.is_authorized(self.notadmin.name, action, model.System())
assert not self.authorizer.is_authorized(u'blah', action, model.System())
assert not self.authorizer.is_authorized(u'visitor', action, model.System())
def test_pkg_edit(self):
#reproduce a bug
action = model.Action.EDIT
assert self.authorizer.is_authorized(self.notadmin.name, action, model.System())
def test_pkg_admin(self):
action = model.Action.PURGE
assert self.authorizer.is_authorized(self.admin.name, action, self.pkg)
assert not self.authorizer.is_authorized(self.admin.name, action, self.pkg2)
assert not self.authorizer.is_authorized(u'blah', action, self.pkg)
def test_grp_sys_admin(self):
action = model.Action.PURGE
assert self.authorizer.is_authorized(self.sysadmin.name, action, self.grp)
assert self.authorizer.is_authorized(self.sysadmin.name, action, self.grp2)
assert not self.authorizer.is_authorized(u'blah', action, self.grp)
| 2.125 | 2 |
aes_cipher/__init__.py | ebellocchia/aes_cipher | 1 | 12757836 | <gh_stars>1-10
#
# Imports
#
from aes_cipher._version import __version__
from aes_cipher.data_ex import DataDecryptError, DataHmacError
from aes_cipher.data_decrypter import DataDecrypter
from aes_cipher.data_encrypter import DataEncrypter
from aes_cipher.file_decrypter import FileDecrypter
from aes_cipher.file_encrypter import FileEncrypter
from aes_cipher.logger import Logger
| 1.335938 | 1 |
setup.py | apache-superset/cherrytree | 10 | 12757837 | <filename>setup.py
import io
import json
import os
import subprocess
from setuptools import find_packages, setup
VERSION = '2.0.0'
REPO = 'https://github.com/apache-superset/cherrytree'
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
with io.open('README.md', encoding='utf-8') as f:
long_description = f.read()
def get_git_sha():
try:
s = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
return s.decode().strip()
except Exception:
return ''
setup(
name='cherrytree',
description=(
'A build tool to pick cherry, bake release branches, and power '
'label-driven development'),
long_description=long_description,
long_description_content_type='text/markdown',
version=VERSION,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
scripts=['cherrytree/bin/cherrytree'],
install_requires=[
'click',
'pygithub',
'python-dateutil',
'GitPython',
'delegator.py',
'pyhocon',
'pyyaml',
'yaspin',
],
author='<NAME>',
author_email='<EMAIL>',
url=REPO,
download_url= REPO + '/tarball/' + VERSION,
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
| 1.625 | 2 |
PyCharm/Desafios/Mundo1/desafio15.py | gabrieldtc/CursoEmVideoPython | 0 | 12757838 | <reponame>gabrieldtc/CursoEmVideoPython
# escreva um programa que pergunte a quantidade de quilometros percorridos por um carro alugado e a quantidade
# de dias pelos quais ele foia alugado. calcule o preço a pagar sabenco que o carro custa R$ 60,00 o dia
# e R$ 0.15 por km rodado
dias = int(input('Quabtos dias ficou com o carro? '))
km = float(input('Quantos quilometros rodados com o carro? '))
res = dias * 60 + km * 0.15
print('O valor a ser pago pelos dias é R$ {:.2f}'.format(res)) | 3.703125 | 4 |
opentech/apply/funds/management/commands/seed_fellowship.py | JakabGy/hypha | 0 | 12757839 | import json
from datetime import date
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from django.db import transaction
from opentech.apply.categories.models import Category
from opentech.apply.funds.models import ApplicationForm, FundType, Round
from opentech.apply.funds.models.forms import ApplicationBaseForm, ApplicationBaseReviewForm
from opentech.apply.review.models import ReviewForm
from opentech.apply.home.models import ApplyHomePage
from opentech.apply.users.groups import STAFF_GROUP_NAME
FS_ROUND_TITLE = 'Fellowship (archive round)'
FS_FUND_TITLE = 'Fellowship (archive fund)'
class Command(BaseCommand):
help = "Pre-seeds the fellowship application and proposal form and fund type. Depends on the categories seed being run first."
@transaction.atomic
def handle(self, *args, **options):
# There's an Internet Freedom Fund open round, so bail out. Avoids duplicate command runs.
if Round.objects.filter(title=FS_ROUND_TITLE).count():
self.stdout.write(self.style.WARNING('Skipping. The target Round/Fund Type and Application Form exist'))
return
application_form = self.create_fellowship_application_form()
proposal_form = self.create_fellowship_proposal_form()
application_review_form = self.create_fellowship_application_review_form()
proposal_review_form = self.create_fellowship_proposal_review_form()
fund = self.create_fellowship_fund_type(application_form, proposal_form, application_review_form, proposal_review_form)
self.create_fellowship_round(fund)
def create_fellowship_application_form(self):
focus_id = Category.objects.get(name='Focus').id
objectives_id = Category.objects.get(name='Objective(s)').id
beneficiaries_id = Category.objects.get(name='Beneficiaries').id
regions_id = Category.objects.get(name='Region(s)').id
addressed_id = Category.objects.get(name='Addressed problems').id
status_id = Category.objects.get(name='Project status').id
data = [
{"type": "text_markup", "value": "<h3>About you</h3>", "id": "ef672ec5-f24c-4e95-9f18-522a5a1e6833"},
{"type": "title", "value": {"field_label": "What is your project name?", "help_text": "", "info": None}, "id": "32c37ee8-7d5b-4fc0-b606-9697a1c7e5c2"},
{"type": "full_name", "value": {"field_label": "Your name", "help_text": "", "info": None}, "id": "3b051ef2-3c75-4a70-aae3-999d58852810"},
{"type": "email", "value": {"field_label": "E-mail", "help_text": "", "info": None}, "id": "bfc488d3-b77d-427d-825d-9000797e9576"},
{"type": "address", "value": {"field_label": "Address", "help_text": "", "info": None}, "id": "2c0db01a-b5ab-4882-aad8-8c9a2ec05e8f"},
{"type": "value", "value": {"field_label": "If you are applying for direct funding, how much do you need?", "help_text": "Amount requested should be less than 50000 USD.", "info": None}, "id": "cfae89dc-f327-45f4-80e9-f267c3bd1ec7"},
{"type": "char", "value": {"field_label": "What is your current or most recent position and employer or research institution?", "help_text": "", "required": "", "format": "", "default_value": ""}, "id": "1282223d-77f5-4047-be03-4df4c4b2148a"},
{"type": "rich_text", "value": {"field_label": "What are (or were) your roles and responsibilities there?", "help_text": "", "required": "", "default_value": ""}, "id": "9c0256e4-42e1-41fe-9880-7f621d6c3458"},
{"type": "dropdown", "value": {"field_label": "Have you ever applied or received funding through an OTF fellowship program?", "help_text": "", "required": "", "choices": ["Yes", "No"]}, "id": "f8efef0a-0632-4c81-b4db-7bc6a06caa7d"},
{"type": "text_markup", "value": "<h3>About your project</h3>", "id": "3541d1b1-afc7-4dcd-8ed9-e9af27de5f3d"},
{"type": "rich_text", "value": {"field_label": "What is your project idea?", "help_text": "", "required": "", "default_value": ""}, "id": "1eb8b4e3-e2bb-4810-a8ce-3fc82a3192c8"},
{"type": "rich_text", "value": {"field_label": "How would you do it?", "help_text": "", "required": "", "default_value": ""}, "id": "177d56e8-2df1-4ead-8e3d-4916610fbed6"},
{"type": "rich_text", "value": {"field_label": "Why are you the right person for this project?", "help_text": "", "required": "", "default_value": ""}, "id": "05ff1755-947b-4e41-8f71-aae99977c572"},
{"type": "duration", "value": {"field_label": "How long do you want to work on this fellowship?", "help_text": "", "info": None}, "id": "3ccac109-2839-4b5d-b133-0e6cfca7c766"},
{"type": "text_markup", "value": "<h3>Host organization</h3>", "id": "f4b3ae6f-a1d6-4c9d-b334-e40614167257"},
{"type": "char", "value": {"field_label": "What is your most ideal host organization?", "help_text": "", "required": "", "format": "", "default_value": ""}, "id": "0afaf4e1-4556-4e79-aa3d-4990e33620da"},
{"type": "char", "value": {"field_label": "What is your next best host organization?", "help_text": "", "required": "", "format": "", "default_value": ""}, "id": "a543b34f-ae6a-4b17-8ac3-ececc14573a0"},
{"type": "text_markup", "value": "<h3>Request specific questions</h3>", "id": "755363fa-6a1c-422f-a03f-89db07a96e17"},
{"type": "rich_text", "value": {"field_label": "Request specific questions", "help_text": "", "required": "", "default_value": ""}, "id": "57cc52e2-b3ff-4e9f-a5fe-42e7735e16c2"},
{"type": "text_markup", "value": "<h3>Descriptors</h3>", "id": "b6ee65b3-d5cd-4cb0-9d7c-6e29d86deaaf"},
{"type": "category", "value": {"field_label": "Status", "help_text": "", "required": "", "category": status_id, "multi": "true"}, "id": "ff4d12ff-7b88-4e87-bb5b-81543aef0e25"},
{"type": "category", "value": {"field_label": "Objectives", "help_text": "", "required": "true", "category": objectives_id, "multi": "true"}, "id": "30c41288-a762-4003-acce-8c12e7343d90"},
{"type": "category", "value": {"field_label": "Beneficiaries", "help_text": "", "required": "", "category": beneficiaries_id, "multi": "true"}, "id": "56833441-542b-4a06-8ad2-8e7e8fd1a334"},
{"type": "category", "value": {"field_label": "Focus", "help_text": "", "required": "", "category": focus_id, "multi": "true"}, "id": "6b404851-ce2b-494f-b9f7-62858a937469"},
{"type": "category", "value": {"field_label": "Addressed problems", "help_text": "", "required": "true", "category": addressed_id, "multi": "true"}, "id": "590e4b77-c4f4-4bd0-b5be-2ad2851da4f5"},
{"type": "category", "value": {"field_label": "Region", "help_text": "", "required": "", "category": regions_id, "multi": "true"}, "id": "81c01278-8ba4-4d84-a1da-e05a07aad874"},
{"type": "multi_file", "value": {"field_label": "Upload", "help_text": "", "required": ""}, "id": "25740b9d-0f8f-4ce1-88fa-c6ee831c6aef"},
{"type": "text_markup", "value": "<h3>I acknowledge</h3>", "id": "f69d3a56-491a-4321-89b7-4d7e34d69a1d"},
{"type": "checkbox", "value": {"field_label": "My application will be dismissed if it does not fit within OTF\'s mission, values, principles statements.", "help_text": "", "default_value": ""}, "id": "5178e15f-d442-4d36-824d-a4292ef77062"},
{"type": "text_markup", "value": "Read our <a href=\"\/about/program\">mission, values, and principles</a>.", "id": "b0c69627-d7db-4633-b46f-0e787dddc779"},
{"type": "checkbox", "value": {"field_label": "I have read and understand OTF\'s Terms and Privacy policy.", "help_text": "", "default_value": ""}, "id": "bd91e220-4cdb-4392-8054-7b7dfe667d46"},
{"type": "text_markup", "value": "Read the <a href=\"\/tos\">Terms and Privacy policy</a>.", "id": "6f6236fd-9d1d-4090-a819-72fb96205bc0"},
{"type": "checkbox", "value": {"field_label": "I am legally able to sign contracts or represent an organization that can.", "help_text": "", "default_value": ""}, "id": "8d000129-ca8b-48cf-8dc2-4651bcbe46e8"},
{"type": "checkbox", "value": {"field_label": "I understand that all intellectual property created with support for this application must be openly licensed.", "help_text": "", "default_value": ""}, "id": "92f0801e-b9dc-4edc-9716-3f1709ae1c9b"},
{"type": "checkbox", "value": {"field_label": "I understand that if my application is incomplete in any way, it will be dismissed.", "help_text": "", "default_value": ""}, "id": "3a3f2da3-4e32-4b86-9060-29c606927114"},
{"type": "checkbox", "value": {"field_label": "I understand that if my application is after a deadline, it will not be reviewed until after the next deadline.", "help_text": "", "default_value": ""}, "id": "19395179-ed9f-4556-9b6b-ab5caef4f610"},
{"type": "text_markup", "value": "<h3>I would like to</h3>", "id": "21c9a554-d0d2-4543-9ca5-f53e506fb7c4"},
{"type": "checkbox", "value": {"field_label": "Sign up to the OTF-Announce list, low traffic (funding opportunities, major alerts, etc).", "help_text": "", "default_value": ""}, "id": "1345a8eb-4dcc-4170-a5ac-edda42d4dafc"},
{"type": "checkbox", "value": {"field_label": "Sign up for OTF\'s daily newsletter (collection of news related to global internet freedom).", "help_text": "", "default_value": ""}, "id": "4ca22ebb-daba-4fb6-a4a6-b130dc6311a8"}
]
application_form, _ = ApplicationForm.objects.get_or_create(name='Fellowship application', defaults={'form_fields': json.dumps(data)})
return application_form
def create_fellowship_proposal_form(self):
data2 = [
{"type": "text_markup", "value": "<h3>Proposal information</h3>", "id": ""},
{"type": "title", "value": {"field_label": "Proposal title", "help_text": "", "info": None}, "id": ""},
{"type": "full_name", "value": {"field_label": "Your name", "help_text": "", "info": None}, "id": "c0c75948-b3c3-42be-8646-bc2a5d8521c3"},
{"type": "email", "value": {"field_label": "E-mail", "help_text": "", "info": None}, "id": "a607ec56-da2a-46d4-b0c9-7c8f3c351a6e"},
{"type": "address", "value": {"field_label": "Address", "help_text": "", "info": None}, "id": "8d3cf1ac-928f-4ee2-ad12-2e5fb16b4748"},
{"type": "value", "value": {"field_label": "If you are applying for direct funding, how much do you need?", "help_text": "Amount requested should be less than 50000 USD.", "info": None}, "id": "cfae89dc-f327-45f4-80e9-f267c3bd1ec7"},
{"type": "duration", "value": {"field_label": "How long do you want to work on this fellowship?", "help_text": "", "info": None}, "id": "08b9b5c3-e01d-41ac-95be-600a4fee7d87"},
{"type": "char", "value": {"field_label": "Host organisation", "help_text": "", "required": "", "format": "", "default_value": ""}, "id": "bc03235e-3c78-4770-9fc2-97feb93c2c8c"},
{"type": "date", "value": {"field_label": "Start date", "help_text": "", "required": "", "default_value": ""}, "id": "672cb6f1-335c-4005-a0f1-46c414feda06"},
{"type": "date", "value": {"field_label": "Completion date", "help_text": "", "required": "", "default_value": ""}, "id": "8262f209-f084-4a79-9dfa-2d18137119bb"},
{"type": "rich_text", "value": {"field_label": "Objectives", "help_text": "", "required": "", "default_value": ""}, "id": "af2c5f38-7257-4295-87fa-787060e845ef"},
{"type": "rich_text", "value": {"field_label": "Milestones and dates", "help_text": "", "required": "", "default_value": ""}, "id": "3c521847-7642-4cae-aca9-d5336ad8962d"},
{"type": "rich_text", "value": {"field_label": "Anticipated outputs and outcomes", "help_text": "", "required": "", "default_value": ""}, "id": "fd0eb7ea-e054-4bcf-9580-eb672d44745c"},
{"type": "text_markup", "value": "<h3>Request specific questions</h3>", "id": "b05a54d1-3a59-41d1-bb70-d5f0f0acd67d"},
{"type": "rich_text", "value": {"field_label": "Request specific questions", "help_text": "", "required": "", "default_value": ""}, "id": "b6d71932-98c2-4ce8-a5e6-454a1f800d21"},
{"type": "multi_file", "value": {"field_label": "Upload", "help_text": "", "required": ""}, "id": "30dfa46e-f656-46c9-9efc-bab9029f2008"}
]
proposal_form, _ = ApplicationForm.objects.get_or_create(name='Fellowship proposal', defaults={'form_fields': json.dumps(data2)})
return proposal_form
def create_fellowship_application_review_form(self):
data3 = [
{"type": "recommendation", "value": {"field_label": "Overall, do you think we should select this applicant and their project to be part of the fellowship program?", "help_text": "", "info": None}, "id": "56264b32-fa39-4c08-b41e-68e9c54b2712"},
{"type": "rich_text", "value": {"field_label": "If no, please select a reason why not.", "help_text": "", "required": "", "default_value": ""}, "id": "f0533950-57f5-4bb7-81ec-2d3813490c88"},
{"type": "rich_text", "value": {"field_label": "Request specific questions", "help_text": "", "required": "", "default_value": ""}, "id": "ba789376-e3f9-434e-8da5-330811723b30"},
{"type": "comments", "value": {"field_label": "Other comments", "help_text": "", "info": None}, "id": "e74e2581-d06c-43b1-9c0b-911407225834"}
]
application_review_form, _ = ReviewForm.objects.get_or_create(name='Fellowship application review', defaults={'form_fields': json.dumps(data3)})
return application_review_form
def create_fellowship_proposal_review_form(self):
data4 = [
{"type": "recommendation", "value": {"field_label": "Overall, do you think we should select this applicant and their project to be part of the fellowship program?", "help_text": "", "info": None}, "id": "e1ea4f9d-64e2-4f28-a68a-851ec0f2d9ad"},
{"type": "rich_text", "value": {"field_label": "If no, please select a reason why not.", "help_text": "", "required": "", "default_value": ""}, "id": "e68b6fe9-8b11-4cf0-8ae4-2ffed75e1e80"},
{"type": "rich_text", "value": {"field_label": "If yes, but you believe some changes need to be made to the proposed effort, please let us know.", "help_text": "", "required": "", "default_value": ""}, "id": "a413f3a2-b486-4bf3-9e2d-c48d19626876"},
{"type": "rich_text", "value": {"field_label": "Request specific questions", "help_text": "", "required": "", "default_value": ""}, "id": "536c963a-f183-45bc-b83f-458b46dc5542"},
{"type": "comments", "value": {"field_label": "Anything else you'd like to give us feedback on?", "help_text": "", "info": None}, "id": "cc82ba7b-b55e-4309-85f0-f68ad6f43471"}
]
proposal_review_form, _ = ReviewForm.objects.get_or_create(name='Fellowship proposal review', defaults={'form_fields': json.dumps(data4)})
return proposal_review_form
def create_fellowship_fund_type(self, application_form, proposal_form, application_review_form, proposal_review_form):
try:
fund = FundType.objects.get(title=FS_FUND_TITLE)
except FundType.DoesNotExist:
apply_home = ApplyHomePage.objects.first()
fund = FundType(title=FS_FUND_TITLE, workflow_name='double')
apply_home.add_child(instance=fund)
fund_form = ApplicationBaseForm.objects.create(application=fund, form=application_form)
fund_form2 = ApplicationBaseForm.objects.create(application=fund, form=proposal_form)
fund.forms = [fund_form, fund_form2]
fund_review_form = ApplicationBaseReviewForm.objects.create(application=fund, form=application_review_form)
fund_review_form2 = ApplicationBaseReviewForm.objects.create(application=fund, form=proposal_review_form)
fund.review_forms = [fund_review_form, fund_review_form2]
fund.save()
return fund
def create_fellowship_round(self, fund):
User = get_user_model()
try:
lead = User.objects.get(full_name="<NAME>")
except User.DoesNotExist:
lead = User.objects.filter(groups__name=STAFF_GROUP_NAME).first()
round = Round(
title=FS_ROUND_TITLE,
lead=lead,
# The date of the original Information Controls Fellowship request type
start_date=date(2013, 1, 1),
end_date=date(2018, 8, 29)
)
round.parent_page = fund
fund.add_child(instance=round)
| 1.921875 | 2 |
synapse/tests/test_lib_stormlib_oauth.py | vishalbelsare/synapse | 0 | 12757840 | <gh_stars>0
import asyncio
import yarl
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.tests.utils as s_test
class OAuthTest(s_test.SynTest):
async def test_storm_oauth(self):
async with self.getTestCore() as core:
# super duper basic
q = '''
$url = https://127.0.0.1:40000
$ckey = foo
$csec = bar
$atkn = biz
$asec = baz
$client = $lib.inet.http.oauth.v1.client($ckey, $csec, $atkn, $asec, $lib.inet.http.oauth.v1.SIG_QUERY)
return($client.sign($url))
'''
url, headers, body = await core.callStorm(q)
self.len(0, headers)
uri = yarl.URL(url)
self.nn(uri.query.get('oauth_signature'))
self.nn(uri.query.get('oauth_nonce'))
self.nn(uri.query.get('oauth_timestamp'))
self.eq(uri.query.get('oauth_version'), '1.0')
self.eq(uri.query.get('oauth_signature_method'), 'HMAC-SHA1')
self.eq(uri.query.get('oauth_consumer_key'), 'foo')
self.eq(uri.query.get('oauth_token'), 'biz')
# headers should get populated
q = '''
$url = "https://vertex.link/fakeapi"
$ckey = beep
$csec = boop
$atkn = neato
$asec = burrito
$headers = $lib.dict(
"content-type"="application/json"
)
$client = $lib.inet.http.oauth.v1.client($ckey, $csec, $atkn, $asec, $lib.inet.http.oauth.v1.SIG_HEADER)
return($client.sign($url, headers=$headers))
'''
url, headers, body = await core.callStorm(q)
uri = yarl.URL(url)
self.eq(str(url), 'https://vertex.link/fakeapi')
self.eq(headers.get('content-type'), 'application/json')
auth = headers.get('Authorization')
self.nn(auth)
params = {}
auth = auth.strip("OAuth ")
for pair in auth.split(', '):
k, v = pair.split('=')
params[k] = v.strip('"')
self.nn(params.get('oauth_nonce'))
self.nn(params.get('oauth_timestamp'))
self.nn(params.get('oauth_signature'))
self.eq(params.get('oauth_version'), '1.0')
self.eq(params.get('oauth_signature_method'), 'HMAC-SHA1')
self.eq(params.get('oauth_consumer_key'), 'beep')
self.eq(params.get('oauth_token'), '<PASSWORD>')
q = '''
$url = "https://vertex.link/fakeapi"
$ckey = beep
$csec = boop
$atkn = neato
$asec = burrito
$headers = $lib.dict(
"Content-Type"="application/json"
)
$body = $lib.dict(
foo = bar,
biz = baz,
)
$client = $lib.inet.http.oauth.v1.client($ckey, $csec, $atkn, $asec, $lib.inet.http.oauth.v1.SIG_BODY)
return($client.sign($url, method='POST', headers=$headers, body=$body))
'''
url, headers, body = await core.callStorm(q)
uri = yarl.URL(url)
self.eq(str(url), 'https://vertex.link/fakeapi')
# it will override the content type header
self.eq(headers, {'Content-Type': 'application/x-www-form-urlencoded'})
self.isin('foo=bar', body)
self.isin('biz=baz', body)
self.isin('oauth_nonce=', body)
self.isin('oauth_timestamp=', body)
self.isin('oauth_version=1.0', body)
self.isin('oauth_signature=', body)
self.isin('oauth_consumer_key=beep', body)
self.isin('oauth_token=<PASSWORD>', body)
self.isin('oauth_signature_method=HMAC-SHA1', body)
# headers should auto-populate if not given
q = '''
$url = "https://vertex.link/fakeapi"
$ckey = beep
$csec = boop
$atkn = neato
$asec = burrito
$body = $lib.dict(
awesome = possum,
)
$client = $lib.inet.http.oauth.v1.client($ckey, $csec, $atkn, $asec, $lib.inet.http.oauth.v1.SIG_BODY)
return($client.sign($url, method='POST', headers=$lib.null, body=$body))
'''
url, headers, body = await core.callStorm(q)
uri = yarl.URL(url)
self.eq(str(url), 'https://vertex.link/fakeapi')
self.eq(headers, {'Content-Type': 'application/x-www-form-urlencoded'})
self.isin('awesome=possum', body)
# body can't be used on GET requests (which is the default method)
q = '''
$url = "https://vertex.link/fakeapi"
$ckey = beep
$csec = boop
$atkn = neato
$asec = burrito
$headers = $lib.dict(
"Content-Type"="application/json"
)
$body = $lib.dict(
foo = bar,
biz = baz,
)
$client = $lib.inet.http.oauth.v1.client($ckey, $csec, $atkn, $asec, $lib.inet.http.oauth.v1.SIG_BODY)
return($client.sign($url, headers=$headers, body=$body))
'''
with self.raises(s_exc.StormRuntimeError):
await core.callStorm(q)
| 2.078125 | 2 |
layer_vis.py | andobrescu/Multi_task_plant_phenotyping | 9 | 12757841 | <filename>layer_vis.py
import keras.backend as K
from keras import callbacks
import numpy as np
import cv2
import matplotlib.pyplot as plt
# On epoch end get the intermediate masks for a set of images at a set of given layers
# layers is given as a list of integers corresponding to convolutional layers
# batch_size is how many images are computed at each layer
class vis_layer(callbacks.Callback):
def __init__(self, imgs, labels, layers, results_path, batch_size=6):
super(vis_layer, self).__init__()
self.X = imgs
self.Y = labels
self.batch_size = batch_size
self.layers = layers
self.output_path = results_path
def on_epoch_end(self, epoch, logs=None):
# random input chouce or specify images
random_idx = np.random.choice(np.arange(0,len(self.X)), self.batch_size)
# random_idx = [75, 86, 22, 268, 180, 210]
img_batch = []
for i in random_idx:
img_batch.append(self.X[i])
img_batch = np.array(img_batch)
img_batch_row = np.concatenate(img_batch, axis = 1)
pred = self.model.predict(img_batch)
print('Predicted')
l_rows = []
step = -1
for l in self.layers:
l_model = K.function([self.model.layers[0].input], [self.model.layers[l].output])
l_out = l_model([img_batch])[0]
l_out_img = []
step += 1
for i in range(self.batch_size):
out_mean = np.average(l_out[i], axis= 2)
out_norm = cv2.normalize(out_mean ,0 , 255, norm_type=cv2.NORM_MINMAX)
l_out_img.append(out_norm)
l_out_img = np.concatenate(l_out_img, axis = 1)
l_rows.append(l_out_img)
num_rows = len(l_rows) + 1
#plot the intermediate masks
plt.subplot(num_rows, 1, 1)
plt.imshow(img_batch_row)
plt.title('Intermediate layer visualization')
plt.xticks([])
plt.yticks([])
plt.ylabel('Original')
for i in range(len(l_rows)):
plt.subplot(num_rows, 1, i+2)
plt.imshow(l_rows[i])
plt.xticks([])
plt.yticks([])
plt.ylabel('layer {}'.format(self.layers[i]))
plt.savefig(self.output_path +'/Layer_vis_epoch_{}.png'.format(epoch))
| 2.890625 | 3 |
petstagram/core/clean_up.py | ZhivkoZhelyazkov/Petstagram | 0 | 12757842 | import os
def clean_up_files(path):
os.remove(path)
| 1.609375 | 2 |
persimmon/view/pins/pin.py | AlvarBer/Persimmon | 206 | 12757843 | from persimmon.view.pins.circularbutton import CircularButton # MYPY HACK
from persimmon.view.util import Type, AbstractWidget, Connection
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.graphics import Color, Ellipse, Line
from kivy.input import MotionEvent
from abc import abstractmethod
Builder.load_file('persimmon/view/pins/pin.kv')
class Pin(CircularButton, metaclass=AbstractWidget):
val = ObjectProperty(None, force_dispatch=True)
block = ObjectProperty()
type_ = ObjectProperty(Type.ANY)
@abstractmethod
def on_touch_down(self, touch: MotionEvent) -> bool:
raise NotImplementedError
@abstractmethod
def on_touch_up(self, touch: MotionEvent) -> bool:
raise NotImplementedError
@abstractmethod
def on_connection_delete(self, connection: Connection):
raise NotImplementedError
@abstractmethod
def connect_pin(self, connection: Connection):
raise NotImplementedError
def typesafe(self, other: 'Pin') -> bool:
""" Tells if a relation between two pins is typesafe. """
if self.block == other.block or self.__class__ == other.__class__:
return False
elif self.type_ == Type.ANY or other.type_ == Type.ANY:
return True # Anything is possible with ANY
else:
return self.type_ == other.type_
# Hack
def on_type_(self, instance: 'Pin', value: Type):
""" If the kv lang was a bit smarted this would not be needed
"""
self.color = value.value
| 2.96875 | 3 |
privacyscanner/worker.py | johnp/privacyscanner | 21 | 12757844 | <reponame>johnp/privacyscanner<filename>privacyscanner/worker.py<gh_stars>10-100
import logging
import multiprocessing
import os
import signal
import socket
import tempfile
import time
from datetime import datetime
from multiprocessing.connection import wait
import psycopg2
from privacyscanner.exceptions import RetryScan, RescheduleLater
from privacyscanner.filehandlers import NoOpFileHandler
from privacyscanner.jobqueue import JobQueue
from privacyscanner.raven import has_raven, raven
from privacyscanner.result import Result
from privacyscanner.scanmeta import ScanMeta
from privacyscanner.scanmodules import load_modules
from privacyscanner.loghandlers import WorkerWritePipeHandler, ScanStreamHandler
from privacyscanner.utils import kill_everything
_JOB_STARTED_QUERY = """
UPDATE scanner_scaninfo
SET scan_host = %s,
time_started = %s,
num_tries = num_tries + 1
WHERE scan_id = %s AND scan_module = %s
"""
_JOB_FINISHED_QUERY = """
UPDATE scanner_scaninfo
SET time_finished = %s
WHERE scan_id = %s AND scan_module = %s
"""
_JOB_FAILED_QUERY = """
UPDATE scanner_scaninfo
SET scan_host = NULL,
time_started = NULL
WHERE scan_id = %s AND scan_module = %s
"""
_LOG_QUERY = """
INSERT INTO scanner_logentry (scan_id, scan_module, scan_host, time_created, level, message)
VALUES (%s, %s, %s, %s, %s, %s)
"""
class WorkerInfo:
def __init__(self, worker_id, process, read_pipe, stop_event, ack_event):
self.id = worker_id
self.process = process
self.read_pipe = read_pipe
self.stop_event = stop_event
self.ack_event = ack_event
self.scan_id = None
self.scan_module = None
self._heartbeat = None
self._last_execution_time = None
self.ping()
@property
def pid(self):
return self.process.pid
def ping(self):
self._heartbeat = time.time()
def ack(self):
self.ack_event.set()
def notify_job_started(self, scan_id, scan_module):
self.scan_id = scan_id
self.scan_module = scan_module
self._last_execution_time = time.time()
def notify_job_finished(self):
self.scan_id = None
self.scan_module = None
notify_job_failed = notify_job_finished
def get_execution_time(self):
if self._last_execution_time is None:
return 0
return max(time.time() - self._last_execution_time, 0)
def stop(self):
self.stop_event.set()
def __str__(self):
return '<{}/{} pid={}>'.format(self.scan_id, self.scan_module, self.pid)
class WorkerMaster:
def __init__(self, db_dsn, scan_module_list, scan_module_options=None,
max_tries=3, num_workers=2, max_executions=100,
max_execution_times=None, raven_dsn=None):
self.name = socket.gethostname()
self._db_dsn = db_dsn
self.scan_module_list = scan_module_list
if scan_module_options is None:
scan_module_options = {}
self.scan_module_options = scan_module_options
self.max_tries = max_tries
self.num_workers = num_workers
self.max_executions = max_executions
if max_execution_times is None:
max_execution_times = {None: None}
self.max_execution_times = max_execution_times
self.max_execution_time = max_execution_times.get(None)
self._raven_dsn = raven_dsn
self._workers = {}
self._worker_ids = set(range(num_workers))
self._terminated_worker_pids = set()
self._running = False
self._force_stop = False
self._conn = None
self._connect()
def start(self):
multiprocessing.set_start_method('spawn')
signal.signal(signal.SIGINT, self._handle_signal_stop)
signal.signal(signal.SIGTERM, self._handle_signal_stop)
signal.signal(signal.SIGUSR1, self._handle_signal_usr1)
self._running = True
while self._running:
self._start_workers()
self._process_queue()
self._check_hanging()
self._remove_workers()
time.sleep(0.25)
print('\nGently asking workers to stop after their current job ...')
for worker_info in self._workers.values():
worker_info.stop()
while not self._force_stop and self._workers:
workers_str = self._get_running_workers_str()
print('{} workers still alive: {}'.format(len(self._workers), workers_str))
self._check_hanging()
self._remove_workers()
time.sleep(0.25)
if self._workers:
print('Forcefully killing workers ...')
for worker_info in self._workers.values():
kill_everything(worker_info.pid)
print('All workers stopped. Shutting down ...')
def stop(self):
if self._running:
self._running = False
else:
self._force_stop = True
def _connect(self):
if self._conn is None or self._conn.closed:
self._conn = psycopg2.connect(self._db_dsn)
def _start_workers(self):
ppid = os.getpid()
for i in range(self.num_workers - len(self._workers)):
worker_id = self._worker_ids.pop()
stop_event = multiprocessing.Event()
ack_event = multiprocessing.Event()
read_pipe, write_pipe = multiprocessing.Pipe(duplex=False)
args = (worker_id, ppid, self._db_dsn, self.scan_module_list,
self.scan_module_options, self.max_tries, self.max_executions,
write_pipe, stop_event, ack_event, self._raven_dsn)
process = WorkerProcess(target=_spawn_worker, args=args)
process.start()
worker_info = WorkerInfo(worker_id, process, read_pipe, stop_event, ack_event)
self._workers[worker_info.pid] = worker_info
def _process_queue(self):
while True:
pipes = [worker_info.read_pipe for worker_info in self._workers.values()
if worker_info.process.is_alive()]
ready_pipes = wait(pipes, timeout=0.1)
if not ready_pipes:
break
for read_pipe in ready_pipes:
try:
event = read_pipe.recv()
except EOFError:
continue
self._process_queue_event(event)
def _process_queue_event(self, event):
pid, action, args = event
worker_info = self._workers[pid]
worker_info.ping()
if action == 'job_started':
scan_id, scan_module_name, time_started, num_tries = args
self._event_job_started(scan_id, scan_module_name, time_started)
worker_info.notify_job_started(scan_id, scan_module_name)
elif action == 'job_finished':
self._event_job_finished(
worker_info.scan_id, worker_info.scan_module, time_finished=args[0])
worker_info.notify_job_finished()
elif action == 'job_failed':
self._event_job_failed(worker_info.scan_id, worker_info.scan_module)
worker_info.notify_job_failed()
elif action == 'log':
log_time, level, message = args
self._event_job_log(worker_info.scan_id, worker_info.scan_module,
log_time, level, message)
elif action == 'add_file':
pass
elif action == 'add_debug_file':
pass
worker_info.ack()
def _event_job_started(self, scan_id, scan_module_name, time_started):
params = (self.name, time_started, scan_id, scan_module_name)
self._execute_sql_autocommit(_JOB_STARTED_QUERY, params)
def _event_job_finished(self, scan_id, scan_module_name, time_finished):
params = (time_finished, scan_id, scan_module_name)
self._execute_sql_autocommit(_JOB_FINISHED_QUERY, params)
def _event_job_failed(self, scan_id, scan_module_name):
params = (scan_id, scan_module_name)
self._execute_sql_autocommit(_JOB_FAILED_QUERY, params)
def _event_job_log(self, scan_id, scan_module_name, log_time, level, message):
log_time = datetime.fromtimestamp(log_time)
params = (scan_id, scan_module_name, self.name, log_time, level, message)
self._execute_sql_autocommit(_LOG_QUERY, params)
def _execute_sql_autocommit(self, query, params):
while True:
try:
self._connect()
with self._conn.cursor() as c:
c.execute(query, params)
self._conn.commit()
break
except psycopg2.OperationalError:
print('Database operational error. Retrying after 10 seconds.')
time.sleep(10)
def _check_hanging(self):
for worker_info in self._workers.values():
max_execution_time = self.max_execution_times.get(
worker_info.scan_module, self.max_execution_time)
if max_execution_time is None:
continue
if worker_info.get_execution_time() > max_execution_time:
worker_info.notify_job_failed()
self._event_job_failed(worker_info.scan_id, worker_info.scan_module)
kill_everything(worker_info.pid)
self._terminated_worker_pids.add(worker_info.pid)
def _remove_workers(self):
for worker_info in self._workers.values():
if not worker_info.process.is_alive():
self._terminated_worker_pids.add(worker_info.pid)
for pid in self._terminated_worker_pids:
self._worker_ids.add(self._workers[pid].id)
del self._workers[pid]
self._terminated_worker_pids.clear()
def _handle_signal_stop(self, signum, frame):
assert signum in (signal.SIGINT, signal.SIGTERM)
self.stop()
def _handle_signal_usr1(self, signum, frame):
assert signum == signal.SIGUSR1
print('Running workers: {}'.format(self._get_running_workers_str()))
def _get_running_workers_str(self):
return ' '.join(str(worker_info) for worker_info in self._workers.values())
def _spawn_worker(*args, **kwargs):
w = Worker(*args, **kwargs)
w.run()
class Worker:
def __init__(self, worker_id, ppid, db_dsn, scan_module_list, scan_module_options,
max_tries, max_executions, write_pipe, stop_event, ack_event,
raven_dsn):
self._id = worker_id
self._pid = os.getpid()
self._ppid = ppid
self._max_executions = max_executions
self._write_pipe = write_pipe
self._stop_event = stop_event
self._ack_event = ack_event
self._old_sigterm = signal.SIG_DFL
self._old_sigint = signal.SIG_DFL
self._raven_client = None
if has_raven and raven_dsn:
self._raven_client = raven.Client(raven_dsn)
scan_modules = load_modules(scan_module_list, scan_module_options)
self._job_queue = JobQueue(db_dsn, scan_modules, max_tries)
def run(self):
while self._max_executions > 0:
# Stop if our master died.
if self._ppid != os.getppid():
break
# Our master asked us to stop. We must obey.
if self._stop_event.is_set():
break
job = self._job_queue.get_job_nowait()
if job is None:
time.sleep(1)
continue
start_info = (job.scan_id, job.scan_module.name, datetime.today(), job.num_tries)
self._notify_master('job_started', start_info)
result = Result(job.current_result, NoOpFileHandler())
logger = logging.Logger(job.scan_module.name)
logger.addHandler(WorkerWritePipeHandler(self._pid, self._write_pipe))
logger.addHandler(ScanStreamHandler())
scan_meta = ScanMeta(worker_id=self._id, num_tries=job.num_tries)
with tempfile.TemporaryDirectory() as temp_dir:
old_cwd = os.getcwd()
os.chdir(temp_dir)
try:
job.scan_module.logger = logger
job.scan_module.scan_site(result, scan_meta)
except RetryScan:
self._job_queue.report_failure()
self._notify_master('job_failed', (datetime.today(),))
except RescheduleLater as e:
self._job_queue.reschedule(e.not_before)
self._job_queue.report_result(result.get_updates())
self._notify_master('job_finished', (datetime.today(),))
except Exception:
logger.exception('Scan module `%s` failed.', job.scan_module.name)
self._job_queue.report_failure()
self._notify_master('job_failed', (datetime.today(),))
if self._raven_client:
self._raven_client.captureException(tags={
'scan_id': job.scan_id,
'scan_module_name': job.scan_module.name
}, extra={'result': result.get_results()})
else:
self._job_queue.report_result(result.get_updates())
self._notify_master('job_finished', (datetime.today(),))
finally:
os.chdir(old_cwd)
kill_everything(self._pid, only_children=True)
self._max_executions -= 1
kill_everything(self._pid)
def _notify_master(self, action, args):
self._write_pipe.send((self._pid, action, args))
self._ack_event.wait()
self._ack_event.clear()
class WorkerProcess(multiprocessing.Process):
def run(self):
# We do not want our worker to receive the signals our parent (master)
# gets. Therefore move it into an own process group.
os.setpgid(0, 0)
super().run()
| 2.203125 | 2 |
snaps/admin.py | rosekairu/SnapGram | 0 | 12757845 | from django.contrib import admin
from.models import Profile
# Register your models here.
admin.site.register(Profile) | 1.320313 | 1 |
recursion/sum-numbers-binary-recursion.py | ivanmmarkovic/code-problems | 2 | 12757846 |
def sum(arr: list, start: int, end: int)->int:
if start > end:
return 0
elif start == end:
return arr[start]
else:
midpoint: int = int(start + (end - start) / 2)
return sum(arr, start, midpoint) + sum(arr, midpoint + 1, end)
numbers1: list = []
numbers2: list = [1]
numbers3: list = [1, 2, 3]
print(sum(numbers1, 0, len(numbers1) - 1))
print(sum(numbers2, 0, len(numbers2) - 1))
print(sum(numbers3, 0, len(numbers3) - 1)) | 3.953125 | 4 |
marketplace/middleware/debug_mode_logging.py | prior/hspy | 0 | 12757847 | import traceback
from django.conf import settings
from django.core.exceptions import MiddlewareNotUsed
from marketplace import logger
class DebugModeLoggingMiddleware(object):
"""
Use this middleware to force logging of errors even when Debug = True. You'll
find this useful in the case that you have QA in DEBUG mode, and you'd still
like to log exceptions that show up there (not just to the screen, which is the
default behavior in DEBUG mode)
If you don't want to fiddle with your middlewares list in different
environments, you can just add this permanently, and then easily turn it off by
using this setting:
DEBUG_MODE_LOGGING = False
You might find it useful to do that for local development, since it may get
annoying to wade through exception logs on your console when you're already
seeing every error on the screen.
"""
def __init__(self):
super(DebugModeLoggingMiddleware,self).__init__()
self.log = logger.get_log(__name__)
if not getattr(settings, 'DEBUG', False):
self.log.info('DebugModeLoggingMiddleware has been turned off for all requests cuz we\'re not in debug mode')
raise MiddlewareNotUsed
if not getattr(settings, 'DEBUG_MODE_LOGGING', True):
self.log.info('DebugModeLoggingMiddleware has been explicitly turned off for all requests')
raise MiddlewareNotUsed
self.log.info('DebugModeLoggingMiddleware has been activated')
def process_exception(self, request, exception):
if settings.DEBUG:
self.log.error(traceback.format_exc(exception))
| 2.265625 | 2 |
src/web/browser.py | thirdshelf/qa-kom-framework | 0 | 12757848 | from abc import ABC
from selenium.webdriver import ActionChains
from selenium.webdriver.common.alert import Alert
from selenium.webdriver.remote.switch_to import SwitchTo
from selenium.webdriver.remote.webelement import WebElement
from ..drivers.driver_manager import DriverManager
from ..general import Log
from ..mixins.javascript import JSBrowserMixin
from ..mixins.wait import WaitBrowserMixin
from ..support.driver_aware import DriverAware
class Browser(DriverAware, ABC):
def __new__(cls, *args, **kwargs):
obj = super(Browser, cls).__new__(cls)
obj.__before_instance = list()
obj.__after_instance = list()
return obj
def execute_script(self, script: str, element: WebElement, *args):
return self.get_driver().execute_script(script, element, *args)
@property
def action_chains(self) -> ActionChains:
return ActionChains(self.get_driver())
def get_driver(self, wait_time: int = 0):
return DriverManager.get_session(self)
def add_before(self, func):
self.__before_instance.append(func)
def add_after(self, func):
self.__after_instance.append(func)
@property
def wait_for(self) -> WaitBrowserMixin:
return WaitBrowserMixin(self)
@property
def switch_to(self) -> SwitchTo:
return SwitchTo(self.get_driver())
@property
def alert(self) -> Alert:
return Alert(self.get_driver())
@property
def js(self) -> JSBrowserMixin:
return JSBrowserMixin(self.get_driver())
def get(self, url: str, extensions: list = ()):
Log.info("Opening %s url" % url)
if not self.get_driver():
for func in self.__before_instance:
func()
DriverManager.create_session(self, extensions)
self.get_driver().get(url)
def refresh(self):
Log.info("Refreshing the browser")
self.get_driver().refresh()
self.wait_for.page_is_loaded()
def current_url(self):
return self.get_driver().current_url
def delete_all_cookies(self):
self.get_driver().delete_all_cookies()
def window_handles(self):
return self.get_driver().window_handles
def close(self):
self.get_driver().close()
def quit(self):
if self.get_driver():
Log.info("Closing the browser")
try:
self.get_driver().quit()
except Exception:
pass
finally:
DriverManager.destroy_session(self)
for func in self.__after_instance:
func()
def get_browser_log(self):
Log.info("Getting browser log")
logs = self.get_driver().get_log('browser')
list_logs = list()
for log_entry in logs:
log_str = ''
for key in log_entry.keys():
log_str += "%s: %s, " % (key, log_entry[key])
list_logs.append(log_str)
return list_logs
| 2.25 | 2 |
notebooks/128.1-BDP-path-cluster-again.py | zeou1/maggot_models | 0 | 12757849 | # %% [markdown]
# # THE MIND OF A MAGGOT
# %% [markdown]
# ## Imports
import os
import time
import warnings
from itertools import chain
import colorcet as cc
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import numpy as np
import pandas as pd
import seaborn as sns
from anytree import LevelOrderGroupIter, NodeMixin
from mpl_toolkits.mplot3d import Axes3D
from scipy.linalg import orthogonal_procrustes
from scipy.optimize import linear_sum_assignment
from sklearn.exceptions import ConvergenceWarning
from sklearn.metrics import adjusted_rand_score
from sklearn.utils.testing import ignore_warnings
from tqdm import tqdm
import pymaid
from graspy.cluster import GaussianCluster
from graspy.embed import AdjacencySpectralEmbed, LaplacianSpectralEmbed, selectSVD
from graspy.models import DCSBMEstimator, RDPGEstimator, SBMEstimator
from graspy.plot import heatmap, pairplot
from graspy.simulations import rdpg
from graspy.utils import augment_diagonal, binarize, pass_to_ranks
from src.data import load_metagraph
from src.graph import preprocess
from src.hierarchy import signal_flow
from src.io import savecsv, savefig
from src.pymaid import start_instance
from src.traverse import Cascade, TraverseDispatcher, to_transmission_matrix
from src.visualization import (
CLASS_COLOR_DICT,
adjplot,
barplot_text,
gridmap,
matrixplot,
set_axes_equal,
stacked_barplot,
)
warnings.filterwarnings(action="ignore", category=ConvergenceWarning)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
rc_dict = {
"axes.spines.right": False,
"axes.spines.top": False,
"axes.formatter.limits": (-3, 3),
"figure.figsize": (6, 3),
"figure.dpi": 100,
}
for key, val in rc_dict.items():
mpl.rcParams[key] = val
context = sns.plotting_context(context="talk", font_scale=1, rc=rc_dict)
sns.set_context(context)
np.random.seed(8888)
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
def stashcsv(df, name, **kws):
savecsv(df, name)
mg = load_metagraph("G", version="2020-04-01")
mg = preprocess(
mg,
threshold=0,
sym_threshold=False,
remove_pdiff=True,
binarize=False,
weight="weight",
)
meta = mg.meta
# plot where we are cutting out nodes based on degree
degrees = mg.calculate_degrees()
fig, ax = plt.subplots(1, 1, figsize=(5, 2.5))
sns.distplot(np.log10(degrees["Total edgesum"]), ax=ax)
q = np.quantile(degrees["Total edgesum"], 0.05)
ax.axvline(np.log10(q), linestyle="--", color="r")
ax.set_xlabel("log10(total synapses)")
# remove low degree neurons
idx = meta[degrees["Total edgesum"] > q].index
mg = mg.reindex(idx, use_ids=True)
# remove center neurons # FIXME
idx = mg.meta[mg.meta["hemisphere"].isin(["L", "R"])].index
mg = mg.reindex(idx, use_ids=True)
mg = mg.make_lcc()
mg.calculate_degrees(inplace=True)
meta = mg.meta
meta["inds"] = range(len(meta))
adj = mg.adj
# %% [markdown]
# ## Setup for paths
out_groups = [
("dVNC", "dVNC;CN", "dVNC;RG", "dSEZ;dVNC"),
("dSEZ", "dSEZ;CN", "dSEZ;LHN", "dSEZ;dVNC"),
("motor-PaN", "motor-MN", "motor-VAN", "motor-AN"),
("RG", "RG-IPC", "RG-ITP", "RG-CA-LP", "dVNC;RG"),
("dUnk",),
]
out_group_names = ["VNC", "SEZ" "motor", "RG", "dUnk"]
source_groups = [
("sens-ORN",),
("sens-MN",),
("sens-photoRh5", "sens-photoRh6"),
("sens-thermo",),
("sens-vtd",),
("sens-AN",),
]
source_group_names = ["Odor", "MN", "Photo", "Temp", "VTD", "AN"]
class_key = "merge_class"
sg = list(chain.from_iterable(source_groups))
og = list(chain.from_iterable(out_groups))
sg_name = "All"
og_name = "All"
from src.traverse import to_markov_matrix
np.random.seed(888)
max_hops = 10
n_init = 100
p = 0.05
traverse = Cascade
simultaneous = True
transition_probs = to_transmission_matrix(adj, p)
transition_probs = to_markov_matrix(adj)
source_inds = meta[meta[class_key].isin(sg)]["inds"].values
out_inds = meta[meta[class_key].isin(og)]["inds"].values
# %% [markdown]
# ## Run paths
from src.traverse import RandomWalk
n_init = 1000
paths = []
path_lens = []
for s in source_inds:
rw = RandomWalk(
transition_probs, stop_nodes=out_inds, max_hops=10, allow_loops=False
)
for n in range(n_init):
rw.start(s)
paths.append(rw.traversal_)
path_lens.append(len(rw.traversal_))
# %% [markdown]
# ## Look at distribution of path lengths
for p in paths:
path_lens.append(len(p))
sns.distplot(path_lens)
paths_by_len = {i: [] for i in range(1, max_hops + 1)}
for p in paths:
paths_by_len[len(p)].append(p)
# %% [markdown]
# ## Embed for a dissimilarity measure
from src.cluster import get_paired_inds
embedder = AdjacencySpectralEmbed(n_components=None, n_elbows=2)
embed = embedder.fit_transform(pass_to_ranks(adj))
embed = np.concatenate(embed, axis=-1)
lp_inds, rp_inds = get_paired_inds(meta)
R, _, = orthogonal_procrustes(embed[lp_inds], embed[rp_inds])
left_inds = meta[meta["left"]]["inds"]
right_inds = meta[meta["right"]]["inds"]
embed[left_inds] = embed[left_inds] @ R
from sklearn.metrics import pairwise_distances
pdist = pairwise_distances(embed, metric="cosine")
# %% [markdown]
# ##
subsample = 2 ** 11
paths = paths_by_len[6]
new_paths = []
for p in paths:
if p[-1] in out_inds:
new_paths.append(p)
paths = new_paths
print(len(paths))
if subsample != -1:
inds = np.random.choice(len(paths), size=subsample, replace=False)
new_paths = []
for i, p in enumerate(paths):
if i in inds:
new_paths.append(p)
paths = new_paths
print(len(paths))
# %% [markdown]
# ##
path_len = len(paths[0])
path_dist_mat = np.zeros((len(paths), len(paths)))
for i in range(len(paths)):
for j in range(len(paths)):
p1 = paths[i]
p2 = paths[j]
dist_sum = 0
for t in range(path_len):
dist = pdist[p1[t], p2[t]]
dist_sum += dist
path_dist_mat[i, j] = dist_sum
path_indicator_mat = np.zeros((len(paths), len(adj)), dtype=int)
for i, p in enumerate(paths):
for j, visit in enumerate(p):
path_indicator_mat[i, visit] = j + 1
# %% [markdown]
# ## Cluster and look at distance mat
from scipy.cluster.hierarchy import linkage
from scipy.spatial.distance import squareform
Z = linkage(squareform(path_dist_mat), method="average")
sns.clustermap(
path_dist_mat,
figsize=(20, 20),
row_linkage=Z,
col_linkage=Z,
xticklabels=False,
yticklabels=False,
)
stashfig("clustermap")
# %% [markdown]
# ##
from graspy.embed import ClassicalMDS
from src.visualization import screeplot
cmds = ClassicalMDS(dissimilarity="precomputed", n_components=10)
path_embed = cmds.fit_transform(path_dist_mat)
plt.plot(cmds.singular_values_, "o")
# %% [markdown]
# ##
from graspy.plot import pairplot
n_components = 5
pairplot(path_embed[:, :n_components], alpha=0.1)
# %% [markdown]
# ##
from graspy.cluster import AutoGMMCluster
n_components = 4
agmm = AutoGMMCluster(max_components=20, n_jobs=-2)
pred = agmm.fit_predict(path_embed[:, :n_components])
print(agmm.n_components_)
pairplot(path_embed[:, :n_components], alpha=0.1, labels=pred, palette=cc.glasbey_light)
# %% [markdown]
# ##
color_dict = dict(zip(np.unique(pred), cc.glasbey_light))
fig, ax = plt.subplots(1, 1, figsize=(20, 20))
adjplot(
path_dist_mat,
sort_class=pred,
cmap=None,
center=None,
ax=ax,
gridline_kws=dict(linewidth=0.5, color="grey", linestyle="--"),
ticks=False,
colors=pred,
palette=color_dict,
cbar=False,
)
stashfig("adjplot-GMMoCMDSoPathDist")
# %% [markdown]
# ##
from sklearn.cluster import AgglomerativeClustering
ag = AgglomerativeClustering(n_clusters=60, affinity="precomputed", linkage="average")
pred = ag.fit_predict(path_dist_mat)
fig, ax = plt.subplots(1, 1, figsize=(20, 20))
color_dict = dict(zip(np.unique(pred), cc.glasbey_light))
adjplot(
path_dist_mat,
sort_class=pred,
cmap=None,
center=None,
ax=ax,
gridline_kws=dict(linewidth=0.5, color="grey", linestyle="--"),
ticks=False,
colors=pred,
palette=color_dict,
)
# %% [markdown]
# ##
meta["signal_flow"] = -signal_flow(adj)
fig, ax = plt.subplots(1, 1, figsize=(20, 20))
meta["class2"].fillna(" ", inplace=True)
matrixplot(
path_indicator_mat,
ax=ax,
plot_type="scattermap",
col_sort_class=["class1", "class2"],
col_class_order="signal_flow",
col_ticks=False,
col_meta=meta,
col_colors="merge_class",
col_palette=CLASS_COLOR_DICT,
# col_ticks=False,
row_sort_class=pred,
row_ticks=False,
sizes=(1, 1),
hue="weight",
palette="tab10",
gridline_kws=dict(linewidth=0.3, color="grey", linestyle="--"),
)
# %% [markdown]
# ##
from sklearn.manifold import MDS
n_components = 8
metric = True
mds = MDS(
n_components=n_components,
metric=True,
n_init=16,
n_jobs=-1,
dissimilarity="precomputed",
)
embed = mds.fit_transform(pass_to_ranks(path_dist_mat))
pairplot(embed, alpha=0.1)
# %%
name = "122.1-BDP-silly-model-testing"
load = True
loc = f"maggot_models/notebooks/outs/{name}/csvs/stash-label-meta.csv"
if load:
meta = pd.read_csv(loc, index_col=0)
for col in ["0_pred", "1_pred", "2_pred", "hemisphere"]:
# meta[col] = meta[col].fillna("")
meta[col] = meta[col].astype(str)
meta[col] = meta[col].replace("nan", "")
meta[col] = meta[col].str.replace(".0", "")
# meta[col] = meta[col].astype(int).astype(str)
# meta[col] = meta[col].fillna("")
# vals =
# meta[col] = meta[col].astype(int).astype(str)
# meta[col].fillna("")
meta["lvl0_labels"] = meta["0_pred"]
meta["lvl1_labels"] = meta["0_pred"] + "-" + meta["1_pred"]
meta["lvl2_labels"] = meta["0_pred"] + "-" + meta["1_pred"] + "-" + meta["2_pred"]
meta["lvl0_labels_side"] = meta["lvl0_labels"] + meta["hemisphere"]
meta["lvl1_labels_side"] = meta["lvl1_labels"] + meta["hemisphere"]
meta["lvl2_labels_side"] = meta["lvl2_labels"] + meta["hemisphere"]
# %%
# %% [markdown]
# ##
# %% [markdown]
# ##
# inds = np.random.choice(len(path_dist_mat), replace=False, size=16000)
# sub_path_indicator_mat = path_indicator_mat[inds]
# %% [markdown]
# ##
fig, ax = plt.subplots(1, 1, figsize=(30, 20))
matrixplot(
path_indicator_mat,
ax=ax,
plot_type="scattermap",
col_sort_class=["lvl2_labels"],
col_class_order="signal_flow",
col_meta=meta,
col_colors="merge_class",
col_item_order=["merge_class", "signal_flow"],
col_palette=CLASS_COLOR_DICT,
col_ticks=False,
row_sort_class=pred,
# row_class_order="size",
row_ticks=False,
sizes=(1, 1),
hue="weight",
palette="Set1",
gridline_kws=dict(linewidth=0.3, color="grey", linestyle="--"),
)
stashfig("path-indicator-map")
# %% [markdown]
# ## compute orders
mean_orders = []
for n in range(path_indicator_mat.shape[1]):
nz = np.nonzero(path_indicator_mat[:, n])
mean_order = np.mean(nz)
mean_orders.append(mean_order)
meta["mean_order"] = mean_orders
# %% [markdown]
# ##
from src.visualization import palplot
fig, axs = plt.subplots(
1, 2, figsize=(30, 20), gridspec_kw=dict(width_ratios=[0.95, 0.02], wspace=0.02)
)
pal = sns.color_palette("Set1", n_colors=7)
pal = pal[:5] + pal[6:]
ax = axs[0]
matrixplot(
path_indicator_mat,
ax=ax,
plot_type="scattermap",
col_sort_class=["lvl2_labels"],
col_class_order="signal_flow",
col_meta=meta,
col_colors="merge_class",
col_item_order=["merge_class", "mean_order"],
col_palette=CLASS_COLOR_DICT,
col_ticks=True,
tick_rot=90,
row_sort_class=pred,
# row_class_order="size",
row_ticks=True,
sizes=(1, 1),
hue="weight",
palette=pal,
gridline_kws=dict(linewidth=0.3, color="grey", linestyle="--"),
)
ax = axs[1]
palplot(pal, cmap="Set1", ax=ax)
ax.set_title("Visit order")
stashfig("path-indicator-map")
| 1.695313 | 2 |
14/main.py | effie-0/Advent-of-Code-2018 | 0 | 12757850 | <gh_stars>0
# 12.14
import re
if __name__ == '__main__':
input_num = 990941
recipe_list = [3, 7]
recipe_str = "37"
worker1 = 0
worker2 = 1
while len(recipe_list) < (input_num + 10):
_sum = recipe_list[worker1] + recipe_list[worker2]
new_r1 = int(_sum / 10)
new_r2 = int(_sum % 10)
if new_r1 != 0:
recipe_list.append(new_r1)
recipe_str += str(new_r1)
recipe_list.append(new_r2)
recipe_str += str(new_r2)
worker1 += recipe_list[worker1] + 1
worker1 = worker1 % len(recipe_list)
worker2 += recipe_list[worker2] + 1
worker2 = worker2 % len(recipe_list)
print(recipe_list[input_num:input_num + 10])
match_obj = re.search(r'990941', recipe_str)
if match_obj:
print(match_obj)
else:
while not match_obj:
_sum = recipe_list[worker1] + recipe_list[worker2]
new_r1 = int(_sum / 10)
new_r2 = int(_sum % 10)
if new_r1 != 0:
recipe_list.append(new_r1)
recipe_str += str(new_r1)
recipe_list.append(new_r2)
recipe_str += str(new_r2)
worker1 += recipe_list[worker1] + 1
worker1 = worker1 % len(recipe_list)
worker2 += recipe_list[worker2] + 1
worker2 = worker2 % len(recipe_list)
match_obj = re.search(r'990941', recipe_str[-12:])
print(len(recipe_str) - 12 + match_obj.span()[0])
| 3.09375 | 3 |