max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
servo/drv/sflag.py | neverware-mirrors/hdctools | 0 | 12767651 | # Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Software flag module to store and report binary status."""
import hw_driver
# pylint: disable=invalid-name
# This follows servod drv naming convention
class sflag(hw_driver.HwDriver):
"""A driver to store and report an on/off flag."""
# This is a binary flag
VALID_VALUES = [0, 1]
# This is not a constant but at the class level so that
# it can be shared between set and get.
# We use a list to allow for sharing across as the class will not make it
# an instance variable if we write into the lists' 0th element.
vstore = [None]
def set(self, value):
"""Set the value to |value|."""
# While these controls _should_ be using a map so that the values
# are converted to on/off, we still need to make sure.
value = int(value)
if value not in self.VALID_VALUES:
raise hw_driver.HwDriverError('Invalid value: %d' %
self.vstore[0])
self.vstore[0] = value
def get(self):
"""Return the |self.vstore| for this flag."""
if self.vstore[0] is None:
# Initialize with a 0 unless a default is provided
self.vstore[0] = int(self._params.get('default_value', 0))
if self.vstore[0] not in self.VALID_VALUES:
# The default must have been invalid. This is because set() guards
# against invalid values already - the only other source of values.
raise hw_driver.HwDriverError('Invalid default: %d' %
self.vstore[0])
return self.vstore[0]
| 2.65625 | 3 |
High School/9th Grade APCSP (Python)/Unit 4/04.01.01.py | SomewhereOutInSpace/Computer-Science-Class | 0 | 12767652 | <reponame>SomewhereOutInSpace/Computer-Science-Class
def bin_value(num):
return bin(num) [2:]
def remove0b(num):
return num [2:]
numberA = int(input(""))
numberB = int(input(""))
binaryA = bin_value(numberA)
binaryB = bin_value(numberB)
sum = bin(int(binaryA,2) + int(binaryB,2))
cleaned = remove0b(sum)
binaryA = str(binaryA)
binaryB = str(binaryB)
sum = str(cleaned)
answer = binaryA + " + " + binaryB + " = " + sum
print(answer)
| 3.609375 | 4 |
src/camguard/generic_mail_client.py | matt-hires/camguard | 3 | 12767653 | <reponame>matt-hires/camguard
import logging
import ssl
from os import path
from email.message import EmailMessage
from smtplib import SMTP as Client
from smtplib import SMTPConnectError
from typing import ClassVar, List
from camguard.bridge_impl import MailClientImpl
from camguard.mail_client_settings import GenericMailClientSettings
LOGGER = logging.getLogger(__name__)
class GenericMailClient(MailClientImpl):
_PORT: ClassVar[int] = 587 # for starttls
_MAIL_MSG: ClassVar[str] = "Camguard motion triggered, files where recorded and uploaded: {files}"
_MAIL_SUBJECT: ClassVar[str] = "Camguard motion triggered"
def __init__(self, settings: GenericMailClientSettings) -> None:
self._settings = settings
self._ssl_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
def send_mail(self, files: List[str]) -> None:
LOGGER.info(f"Sending mail with: {files}")
sender = self._settings.sender_mail
receiver = self._settings.receiver_mail
try:
with Client(host=self._settings.hostname, port=GenericMailClient._PORT) as client:
client.starttls()
client.login(self._settings.user, self._settings.password)
client.send_message(self._create_msg(sender, receiver, files))
except (SMTPConnectError, OSError) as client_err:
LOGGER.error(f"Error while connecting to mail server: {self._settings.hostname}:{GenericMailClient._PORT}",
exc_info=client_err)
def _create_msg(self, sender: str, receiver: str, files: List[str]) -> EmailMessage:
msg: EmailMessage = EmailMessage()
msg.add_header("Subject", GenericMailClient._MAIL_SUBJECT)
msg.add_header("From", sender)
msg.add_header("To", receiver)
msg.set_content(GenericMailClient._MAIL_MSG.format(files=[path.basename(file) for file in files]))
return msg
| 2.1875 | 2 |
digital_land/datatype/address.py | digital-land/digital-land-python | 3 | 12767654 | <reponame>digital-land/digital-land-python<gh_stars>1-10
import re
from .datatype import DataType
comma_re = re.compile(r"(\s*,\s*){1,}")
hyphen_re = re.compile(r"(\s*-\s*){1,}")
class AddressDataType(DataType):
def normalise(self, value, issues=None):
# replace newlines and semi-colons with commas
value = ", ".join(value.split("\n"))
value = value.replace(";", ",")
# normalise duplicate commas
value = comma_re.sub(", ", value)
value = value.strip(", ")
# remove spaces around hyphens
value = hyphen_re.sub("-", value)
# remove double-quotes, normalise spaces
value = " ".join(value.split()).replace('"', "").replace("”", "")
return value
| 3.21875 | 3 |
main.py | JuXinglong/WFLOP_SUGGA_Python | 23 | 12767655 | import numpy as np
import pandas as pd
import WindFarmGenetic # wind farm layout optimization using genetic algorithms classes
from datetime import datetime
import os
from sklearn.svm import SVR
import pickle
# Wind farm settings and algorithm settings
# parameters for the genetic algorithm
elite_rate = 0.2
cross_rate = 0.6
random_rate = 0.5
mutate_rate = 0.1
wt_N = 25 # number of wind turbines 15, 20, or 25
# NA_loc_array : not available location array, the index starting from 1
# L1 : wind farm, cells 121(inclusive) to 144(inclusive)
NA_loc_array = np.arange(121, 145, 1)
#
# # L2
# NA_loc_array = np.arange(61, 85, 1)
#
# # L3
# NA_loc_array = np.concatenate((np.arange(11, 144, 12), np.arange(12, 145, 12)))
#
# # L4
# NA_loc_array = np.concatenate((np.arange(6, 144, 12), np.arange(7, 145, 12)))
#
# # L5
# NA_loc_array = np.concatenate((np.arange(41, 105, 12), np.arange(42, 105, 12),
# np.arange(43, 105, 12),
# np.arange(44, 105, 12)))
#
# # L6
# NA_loc_array = np.concatenate((np.arange(1, 28, 12), np.arange(2, 28, 12),
# np.arange(12, 37, 12),
# np.arange(11, 37, 12),
# np.arange(109, 145, 12), np.arange(119, 145, 12),
# np.arange(110, 145, 12),
# np.arange(120, 145, 12),
# ))
#
# # L7
# NA_loc_array = np.arange(133, 145, 1)
#
# # L8
# NA_loc_array = np.arange(61, 73, 1)
#
# # L9
# NA_loc_array = np.arange(12, 145, 12)
#
# # L10
# NA_loc_array = np.arange(6, 145, 12)
#
# # L11
# NA_loc_array = np.concatenate((np.arange(42, 105, 12),
# np.arange(43, 105, 12)))
#
# # L12
# NA_loc_array = np.array((1, 2, 11, 12, 13, 24, 121, 132, 133, 134, 143, 144))
# convert numpy array to list, datatype convert
NA_loc = NA_loc_array.tolist()
# L0
# NA_loc = []
population_size = 120 # how many layouts in a population
iteration_times = 200 # how many iterations in a genetic algorithm run
n_inits = 100 # number of initial populations n_inits >= run_times
run_times = 100 # number of different initial populations
# wind farm size, cells
cols_cells = 12 # number of cells each row
rows_cells = 12 # number of cells each column
cell_width = 77.0 * 3 # unit : m
# all data will be save in data folder
data_folder = "data"
if not os.path.exists(data_folder):
os.makedirs(data_folder)
# Create a WindFarmGenetic object
# create an WindFarmGenetic object. Specify the number of rows and the number columns of the wind farm land. N is the number of wind turbines.
# NA_loc is the not available locations on the wind farm land. Landowners does not want to participate in the wind farm.
# pop_size: how many individuals in the population
# iteration: iteration times of the genetic algorithm
wfg = WindFarmGenetic.WindFarmGenetic(rows=rows_cells, cols=cols_cells, N=wt_N, NA_loc=NA_loc, pop_size=population_size,
iteration=iteration_times, cell_width=cell_width, elite_rate=elite_rate,
cross_rate=cross_rate, random_rate=random_rate, mutate_rate=mutate_rate)
# Specify the wind distribution
# wind distribution is discrete (number of wind speeds) by (number of wind directions)
# wfg.init_1_direction_1_N_speed_13()
# file name to store the wind power distribution SVR model
# svr_model_filename = 'svr_1s1d_N_13.svr'
# wfg.init_4_direction_1_speed_13()
# svr_model_filename = 'svr_1s4d_13.svr'
wfg.init_6_direction_1_speed_13()
# svr_model_filename = 'svr_1s6d_13.svr'
################################################
# generate initial populations
################################################
# initial population saved folder
init_pops_data_folder = "{}/init_data".format(data_folder)
if not os.path.exists(init_pops_data_folder):
os.makedirs(init_pops_data_folder)
# generate initial populations to start with and store them
# in order to start from the same initial population for different methods
# so it is fair to compare the final results
for i in range(n_inits):
wfg.gen_init_pop_NA()
wfg.save_init_pop_NA("{}/init_{}.dat".format(init_pops_data_folder, i),
"{}/init_{}_NA.dat".format(init_pops_data_folder, i))
# Create results folder
# results folder
# adaptive_best_layouts_N60_9_20190422213718.dat : best layout for AGA of run index 9
# result_CGA_20190422213715.dat : run time and best eta for CGA method
results_data_folder = "data/results"
if not os.path.exists(results_data_folder):
os.makedirs(results_data_folder)
# if cg,ag,sg folder does not exist, create these folders. Folders to store the running results
# cg: convertional genetic algorithm
# ag: adaptive genetic algorithm
# sg: support vector regression guided genetic algorithm
cg_result_folder = "{}/cg".format(results_data_folder)
if not os.path.exists(cg_result_folder):
os.makedirs(cg_result_folder)
ag_result_folder = "{}/ag".format(results_data_folder)
if not os.path.exists(ag_result_folder):
os.makedirs(ag_result_folder)
sg_result_folder = "{}/sg".format(results_data_folder)
if not os.path.exists(sg_result_folder):
os.makedirs(sg_result_folder)
# resul_arr: run_times by 2 , the first column is the run time in seconds for each run and the second column is the conversion efficiency for the run
result_arr = np.zeros((run_times, 2), dtype=np.float32)
# Run adaptive genetic algorithm (AGA)
# CGA: Conventional genetic algorithm
for i in range(0, run_times): # run times
print("run times {} ...".format(i))
# load initial population
wfg.load_init_pop_NA("{}/init_{}.dat".format(init_pops_data_folder, i),
"{}/init_{}_NA.dat".format(init_pops_data_folder, i))
# run the conventional genetic algorithm and return run time and conversion efficiency
run_time, eta = wfg.conventional_genetic_alg(i, result_folder=cg_result_folder)
result_arr[i, 0] = run_time
result_arr[i, 1] = eta
time_stamp = datetime.now().strftime("%Y%m%d%H%M%S")
# save the run time and etas to a file
filename = "{}/result_conventional_{}.dat".format(cg_result_folder, time_stamp)
np.savetxt(filename, result_arr, fmt='%f', delimiter=" ")
# Run adaptive genetic algorithm (AGA)
# AGA: adaptive genetic algorithm
for i in range(0, run_times): # run times
print("run times {} ...".format(i))
wfg.load_init_pop_NA("{}/init_{}.dat".format(init_pops_data_folder, i),
"{}/init_{}_NA.dat".format(init_pops_data_folder, i))
run_time, eta = wfg.adaptive_genetic_alg(i, result_folder=ag_result_folder)
result_arr[i, 0] = run_time
result_arr[i, 1] = eta
time_stamp = datetime.now().strftime("%Y%m%d%H%M%S")
filename = "{}/result_adaptive_{}.dat".format(ag_result_folder, time_stamp)
np.savetxt(filename, result_arr, fmt='%f', delimiter=" ")
# Run support vector regression guided genetic algorithm (SUGGA)
# Generate wind distribution surface
#############################################
# generate wind distribution surface
#############################################
n_mc_samples = 10000 # svr train data, number of layouts to average
wds_data_folder = "{}/wds".format(data_folder)
if not os.path.exists(wds_data_folder):
os.makedirs(wds_data_folder)
# mc : monte-carlo
# number of layouts to generate as the training data for regression
# to build the power distribution surface
# mc_layout.dat file stores layouts only with 0s and 1s. 0 means no turbine here. 1 means one turbine here.
# mc_layout_NA.dat file stores layouts with 0s, 1s and 2s. 2 means no turbine and not available for turbine.
# These two files are used to generate wind power distribution.
# Each file has 10000 lines. Each line is layout.
# gen_mc_grid_with_NA_loc function generates these two files.
train_mc_layouts, train_mc_layouts_NA = WindFarmGenetic.LayoutGridMCGenerator.gen_mc_grid_with_NA_loc(rows_cells,
cols_cells,
n_mc_samples,
wt_N, NA_loc,
"{}/mc_layout.dat".format(
wds_data_folder),
"{}/mc_layout_NA.dat".format(
wds_data_folder))
# wfg.init_1_direction_1_N_speed_13()
# file name to store the wind power distribution SVR model
# svr_model_filename = 'svr_1s1d_N_13.svr'
# wfg.init_4_direction_1_speed_13()
# svr_model_filename = 'svr_1s4d_13.svr'
# wfg.init_6_direction_1_speed_13()
svr_model_filename = 'svr_1s6d_13.svr'
# load Monte-Carlo layouts from a text file. 10000 random layouts
layouts = np.genfromtxt("{}/mc_layout.dat".format(wds_data_folder), delimiter=" ", dtype=np.int32)
# generate the location index coordinate and average power output at each location index coordinate
# location index coordinate : in the cells, the cell with index 1 has location index (0,0) and the cell 2 has (1,0)
# store the location index coordinate in x.dat and average power in y.dat
wfg.mc_gen_xy_NA(rows=rows_cells, cols=cols_cells, layouts=layouts, n=n_mc_samples, N=wt_N,
xfname="{}/x.dat".format(wds_data_folder),
yfname="{}/y.dat".format(wds_data_folder))
# read index location coordinates
x_original = pd.read_csv("{}/x.dat".format(wds_data_folder), header=None, nrows=rows_cells * cols_cells,
delim_whitespace=True, dtype=np.float32)
x_original = x_original.values
# read the power output of each index location coordinate
y_original = pd.read_csv("{}/y.dat".format(wds_data_folder), header=None, nrows=rows_cells * cols_cells,
delim_whitespace=True, dtype=np.float32)
y_original = y_original.values.flatten()
# create a SVR object and specify the kernal and other parameters
svr_model = SVR(kernel='rbf', C=2000.0, gamma=0.3, epsilon=.1)
# build the SVR power distribution model
svr_model.fit(x_original, y_original)
# save the SVR model to a file
pickle.dump(svr_model, open("{}/{}".format(wds_data_folder, svr_model_filename), 'wb'))
# This is how to load SVR model from a file
# svr_model = pickle.load(open("{}/{}".format(wds_data_folder,svr_model_filename), 'rb'))
# SUGGA: support vector regression guided genetic algorithm
for i in range(0, run_times): # run times
print("run times {} ...".format(i))
wfg.load_init_pop_NA("{}/init_{}.dat".format(init_pops_data_folder, i),
"{}/init_{}_NA.dat".format(init_pops_data_folder, i))
run_time, eta = wfg.sugga_genetic_alg(i, svr_model=svr_model, result_folder=sg_result_folder)
result_arr[i, 0] = run_time
result_arr[i, 1] = eta
time_stamp = datetime.now().strftime("%Y%m%d%H%M%S")
filename = "{}/result_sugga_{}.dat".format(sg_result_folder, time_stamp)
np.savetxt(filename, result_arr, fmt='%f', delimiter=" ")
| 2.40625 | 2 |
04 - Classes-inheritance-oops/17-classes-inheritance-setters-shallow.py | python-demo-codes/basics | 2 | 12767656 | # HEAD
# Classes - Setters are shallow
# DESCRIPTION
# Describes how setting of inherited attributes and values function
# RESOURCES
#
# Creating Parent class
class Parent():
par_cent = "parent"
# Parent Init method
def __init__(self, val):
self.par_cent = val
print("Parent Instantiated with ", self.par_cent)
# Creating ParentTwo class
class Child(Parent):
# Child Init method
def __init__(self, val_two):
print("Child Instantiated with ", val_two)
# Explicit Instantiation of parent init
# Instantiate parent once and passing args
# Parent assigns value storing it in object to access
# Parent default value remains
self.p = super()
self.p.__init__(val_two)
obj = Child(10)
print("""
Value of par_cent is assigned &
fetched from Child class &
default refs of parent remains
""")
print("obj.par_cent", obj.par_cent)
print("obj.p.par_cent, id(obj.p), id(obj)", obj.p.par_cent, id(obj.p), id(obj))
| 4.46875 | 4 |
PyMRFLSSVM/Utils/ReadMat.py | yirencaifu/sample_python_trading_algo | 0 | 12767657 | <filename>PyMRFLSSVM/Utils/ReadMat.py
# -*- coding: utf-8 -*-
__author__ = 'spacegoing'
from scipy.io import loadmat
import numpy as np
def loadMatPairwise():
mat = loadmat('tmpData/learnPairwise.mat')
observed_unary = mat['instance']['unary'][0][0]
observed_unary = np.reshape(observed_unary, [128, 128, 2], order='F')
pairwise_raw = mat['instance']['pairwise'][0][0]
unary_observed = np.zeros([128, 128, 2], order='C', dtype=np.double)
for i in range(128):
for j in range(128):
unary_observed[i][j][0] = observed_unary[i][j][0]
unary_observed[i][j][1] = observed_unary[i][j][1]
pairwise = np.zeros(pairwise_raw.shape, order='C', dtype=np.double)
for i in range(pairwise_raw.shape[0]):
for j in range(3):
pairwise[i][j] = pairwise_raw[i][j]
pairwise[:, 0] -= 1
pairwise[:, 1] -= 1
return unary_observed, pairwise
def loadTestInf():
mat = loadmat('tmpData/testInf.mat')
observed_unary = mat['unary_observed'].astype(np.double)
pairwise_raw = mat['pairwise'].astype(np.double)
w = mat['w']
y_inferred = mat['y_inferred'].reshape([128, 128], order='F')
z_inferred = mat['z_inferred'].reshape([9, 64], order='F')
e = mat['e']
unary_observed = np.zeros([128, 128, 2], order='C', dtype=np.double)
for i in range(128):
for j in range(128):
unary_observed[i][j][0] = observed_unary[i][j][0]
unary_observed[i][j][1] = observed_unary[i][j][1]
pairwise = np.zeros(pairwise_raw.shape, order='C', dtype=np.double)
for i in range(pairwise_raw.shape[0]):
for j in range(3):
pairwise[i][j] = pairwise_raw[i][j]
theta = np.zeros(1 + 2 * 9, order='C', dtype=np.double)
theta[0] = w[0, 0]
for i in range(1, 10):
theta[i] = w[i, 0] - w[i - 1, 0]
theta[i + 9] = w[i, 1] - w[i - 1, 1]
return unary_observed, pairwise, theta, y_inferred, z_inferred, e
| 2.390625 | 2 |
main.py | nazariinyzhnyk/kaggle_fraud_detection | 0 | 12767658 | <reponame>nazariinyzhnyk/kaggle_fraud_detection
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
import h2o
from h2o.estimators.gbm import H2OGradientBoostingEstimator
pd.set_option('display.max_columns', None)
identity_train = pd.read_csv('data/train_identity.csv')
transactions_train = pd.read_csv('data/train_transaction.csv')
identity_test = pd.read_csv('data/test_identity.csv')
transactions_test = pd.read_csv('data/test_transaction.csv')
train = pd.merge(transactions_train, identity_train, on='TransactionID', how='left')
test = pd.merge(transactions_test, identity_test, on='TransactionID', how='left')
del identity_train, transactions_train, identity_test, transactions_test
print('Data loaded and merged.')
print('Num of columns train: %i' % train.shape[1])
print('Num of rows train: %i' % train.shape[0])
print('Num of columns test: %i' % test.shape[1])
print('Num of rows test: %i' % test.shape[0])
train['TransactionAmt_to_mean_card1'] = train['TransactionAmt'] / train.groupby(['card1'])['TransactionAmt'].transform('mean')
train['TransactionAmt_to_mean_card4'] = train['TransactionAmt'] / train.groupby(['card4'])['TransactionAmt'].transform('mean')
train['TransactionAmt_to_std_card1'] = train['TransactionAmt'] / train.groupby(['card1'])['TransactionAmt'].transform('std')
train['TransactionAmt_to_std_card4'] = train['TransactionAmt'] / train.groupby(['card4'])['TransactionAmt'].transform('std')
test['TransactionAmt_to_mean_card1'] = test['TransactionAmt'] / test.groupby(['card1'])['TransactionAmt'].transform('mean')
test['TransactionAmt_to_mean_card4'] = test['TransactionAmt'] / test.groupby(['card4'])['TransactionAmt'].transform('mean')
test['TransactionAmt_to_std_card1'] = test['TransactionAmt'] / test.groupby(['card1'])['TransactionAmt'].transform('std')
test['TransactionAmt_to_std_card4'] = test['TransactionAmt'] / test.groupby(['card4'])['TransactionAmt'].transform('std')
train['id_02_to_mean_card1'] = train['id_02'] / train.groupby(['card1'])['id_02'].transform('mean')
train['id_02_to_mean_card4'] = train['id_02'] / train.groupby(['card4'])['id_02'].transform('mean')
train['id_02_to_std_card1'] = train['id_02'] / train.groupby(['card1'])['id_02'].transform('std')
train['id_02_to_std_card4'] = train['id_02'] / train.groupby(['card4'])['id_02'].transform('std')
test['id_02_to_mean_card1'] = test['id_02'] / test.groupby(['card1'])['id_02'].transform('mean')
test['id_02_to_mean_card4'] = test['id_02'] / test.groupby(['card4'])['id_02'].transform('mean')
test['id_02_to_std_card1'] = test['id_02'] / test.groupby(['card1'])['id_02'].transform('std')
test['id_02_to_std_card4'] = test['id_02'] / test.groupby(['card4'])['id_02'].transform('std')
train['D15_to_mean_card1'] = train['D15'] / train.groupby(['card1'])['D15'].transform('mean')
train['D15_to_mean_card4'] = train['D15'] / train.groupby(['card4'])['D15'].transform('mean')
train['D15_to_std_card1'] = train['D15'] / train.groupby(['card1'])['D15'].transform('std')
train['D15_to_std_card4'] = train['D15'] / train.groupby(['card4'])['D15'].transform('std')
test['D15_to_mean_card1'] = test['D15'] / test.groupby(['card1'])['D15'].transform('mean')
test['D15_to_mean_card4'] = test['D15'] / test.groupby(['card4'])['D15'].transform('mean')
test['D15_to_std_card1'] = test['D15'] / test.groupby(['card1'])['D15'].transform('std')
test['D15_to_std_card4'] = test['D15'] / test.groupby(['card4'])['D15'].transform('std')
train['D15_to_mean_addr1'] = train['D15'] / train.groupby(['addr1'])['D15'].transform('mean')
train['D15_to_mean_card4'] = train['D15'] / train.groupby(['card4'])['D15'].transform('mean')
train['D15_to_std_addr1'] = train['D15'] / train.groupby(['addr1'])['D15'].transform('std')
train['D15_to_std_card4'] = train['D15'] / train.groupby(['card4'])['D15'].transform('std')
test['D15_to_mean_addr1'] = test['D15'] / test.groupby(['addr1'])['D15'].transform('mean')
test['D15_to_mean_card4'] = test['D15'] / test.groupby(['card4'])['D15'].transform('mean')
test['D15_to_std_addr1'] = test['D15'] / test.groupby(['addr1'])['D15'].transform('std')
test['D15_to_std_card4'] = test['D15'] / test.groupby(['card4'])['D15'].transform('std')
train[['P_emaildomain_1', 'P_emaildomain_2', 'P_emaildomain_3']] = train['P_emaildomain'].str.split('.', expand=True)
train[['R_emaildomain_1', 'R_emaildomain_2', 'R_emaildomain_3']] = train['R_emaildomain'].str.split('.', expand=True)
test[['P_emaildomain_1', 'P_emaildomain_2', 'P_emaildomain_3']] = test['P_emaildomain'].str.split('.', expand=True)
test[['R_emaildomain_1', 'R_emaildomain_2', 'R_emaildomain_3']] = test['R_emaildomain'].str.split('.', expand=True)
print('Features extracted.')
many_null_cols = [col for col in train.columns if train[col].isnull().sum() / train.shape[0] > 0.9]
many_null_cols_test = [col for col in test.columns if test[col].isnull().sum() / test.shape[0] > 0.9]
big_top_value_cols = [col for col in train.columns if train[col].value_counts(dropna=False, normalize=True).values[0] > 0.9]
big_top_value_cols_test = [col for col in test.columns if test[col].value_counts(dropna=False, normalize=True).values[0] > 0.9]
one_value_cols = [col for col in train.columns if train[col].nunique() <= 1]
one_value_cols_test = [col for col in test.columns if test[col].nunique() <= 1]
cols_to_drop = list(set(many_null_cols + many_null_cols_test + big_top_value_cols + big_top_value_cols_test + one_value_cols+ one_value_cols_test))
cols_to_drop.remove('isFraud')
train = train.drop(cols_to_drop, axis=1)
test = test.drop(cols_to_drop, axis=1)
print('Redundant columns dropped.')
cat_cols = ['id_12', 'id_13', 'id_14', 'id_15', 'id_16', 'id_17', 'id_18', 'id_19', 'id_20', 'id_21', 'id_22', 'id_23', 'id_24', 'id_25', 'id_26', 'id_27', 'id_28', 'id_29',
'id_30', 'id_31', 'id_32', 'id_33', 'id_34', 'id_35', 'id_36', 'id_37', 'id_38', 'DeviceType', 'DeviceInfo', 'ProductCD', 'card4', 'card6', 'M4','P_emaildomain',
'R_emaildomain', 'card1', 'card2', 'card3', 'card5', 'addr1', 'addr2', 'M1', 'M2', 'M3', 'M5', 'M6', 'M7', 'M8', 'M9',
'P_emaildomain_1', 'P_emaildomain_2', 'P_emaildomain_3', 'R_emaildomain_1', 'R_emaildomain_2', 'R_emaildomain_3']
for col in cat_cols:
if col in train.columns:
le = LabelEncoder()
le.fit(list(train[col].astype(str).values) + list(test[col].astype(str).values))
train[col] = le.transform(list(train[col].astype(str).values))
test[col] = le.transform(list(test[col].astype(str).values))
X = train.sort_values('TransactionDT').drop(['TransactionDT', 'TransactionID'], axis=1)
# y = train.sort_values('TransactionDT')['isFraud']
#X_test = test.sort_values('TransactionDT').drop(['TransactionDT', 'TransactionID'], axis=1)
X_test = test.drop(['TransactionDT', 'TransactionID'], axis=1)
del train
test = test[["TransactionDT", 'TransactionID']]
def clean_inf_nan(df):
return df.replace([np.inf, -np.inf], np.nan)
X = clean_inf_nan(X)
X_test = clean_inf_nan(X_test)
types_dict = {}
for (columnName, columnData) in X.iteritems():
if len(np.unique(columnData.values)) < 4:
types_dict[columnName] = "categorical"
else:
types_dict[columnName] = "numeric"
h2o.init()
data_train_h2o = h2o.H2OFrame(X, column_types=types_dict)
del types_dict['isFraud']
data_test_h2o = h2o.H2OFrame(X_test, column_types=types_dict)
data_train_h2o['isFraud'] = data_train_h2o['isFraud'].asfactor()
model = H2OGradientBoostingEstimator(
ntrees=1000,
learn_rate=0.01,
stopping_rounds=5, stopping_tolerance=1e-4, stopping_metric = "AUC",
sample_rate=0.8,
col_sample_rate=0.8,
seed=1234,
score_tree_interval=10, nfolds=5, max_depth=10)
model.train(y='isFraud', training_frame=data_train_h2o, model_id="GBM_Fraud_depth10",
validation_frame=data_train_h2o)
print(model.cross_validation_metrics_summary())
f = model.predict(test_data=data_test_h2o)
f = f.as_data_frame()
h2o.save_model(model=model, path="notebooks/models/", force=True)
h2o.cluster().shutdown()
print(f.head())
print(f.predict.value_counts())
submission_frame = pd.DataFrame(data={'TransactionID': test.TransactionID, 'isFraud': f['1.0']})
submission_frame.to_csv('submit/submission.csv', index=False)
| 2.40625 | 2 |
inventory/migrations/0002_auto_20200919_1121.py | Kgford/TCLI | 0 | 12767659 | <filename>inventory/migrations/0002_auto_20200919_1121.py
# Generated by Django 3.0.8 on 2020-09-19 15:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='inventory',
name='quantity',
),
migrations.RemoveField(
model_name='inventory',
name='recieved_date',
),
migrations.RemoveField(
model_name='inventory',
name='shipped_date',
),
migrations.AddField(
model_name='events',
name='rtv',
field=models.CharField(default='N/A', max_length=20, verbose_name='Item Serial number'),
),
migrations.AlterField(
model_name='inventory',
name='category',
field=models.CharField(default='N/A', max_length=50, null=True, verbose_name='category'),
),
migrations.AlterField(
model_name='inventory',
name='description',
field=models.CharField(default='N/A', max_length=200, null=True, verbose_name='description'),
),
migrations.AlterField(
model_name='inventory',
name='modelname',
field=models.CharField(default='N/A', max_length=50, null=True, verbose_name='modelname'),
),
migrations.AlterField(
model_name='inventory',
name='serial_number',
field=models.CharField(default='N/A', max_length=50, null=True, verbose_name='serial number'),
),
migrations.AlterField(
model_name='inventory',
name='shelf',
field=models.CharField(default='N/A', max_length=10, null=True, verbose_name='shelf'),
),
migrations.AlterField(
model_name='inventory',
name='status',
field=models.CharField(default='N/A', max_length=50, null=True, verbose_name='status'),
),
]
| 1.75 | 2 |
ChocoBallDetector/image_preprocess.py | tok41/choco-ball-statistics | 0 | 12767660 |
# -*- coding: utf-8 -*-
"""
目的
- アノテーション作業の前の一番最初の画像データの前処理
- 画像サイズを小さくする & 画像サイズを揃える
"""
import os
import glob
import numpy as np
from PIL import Image
import argparse
def main(args):
img_files = glob.glob(os.path.join(args.img_dir, args.img_filter))
print('image_dir : ', args.img_dir, ', filter : ', args.img_filter)
print('image file number : ', len(img_files))
"""
画像サイズが異なるものがあるが、縦横比は同じと仮定。
高さを302に固定して、リサイズする。
これで不具合出るようなら、強制的に(402, 302)でリサイズすれば良い。
"""
height_size = 302
for img_file in img_files:
org_img = Image.open(img_file)
img = org_img.copy()
if img.height > img.width: # 向きを一定にする
img = img.rotate(90, expand=True)
scale = float(height_size) / img.height
res_img = img.resize((int(img.width*scale), height_size))
res_img.save(os.path.join(args.out_dir, img_file.split('/')[-1]))
print(img_file, np.array(org_img).shape, '->', np.array(res_img).shape)
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='argparser')
parser.add_argument('--img_dir', type=str, default='data/org_images')
parser.add_argument('--out_dir', type=str, default='data/res_images')
parser.add_argument('--img_filter', type=str, default='*.JPG')
args = parser.parse_args()
main(args)
| 3.234375 | 3 |
python/25-solution/actions.py | gras/SensorWorkshop | 0 | 12767661 | <filename>python/25-solution/actions.py
# Making Sense of Sensors Workshop
# Educators Edition 2015
#
# 25-FindAndGrabTribble
# actions.py
'''
@author: Dead Robot Society
'''
from time import sleep
import movement as move
import sensor
##################################
# public routines
##################################
'''
rotate to the left, until a red pom is seen
'''
def findTribble() :
print "findTribble()"
tribbleFound = 0
move.spinLeftNoStop()
while not tribbleFound :
sleep(0.2)
pos = sensor.getRedPosition()
print "Pos: ", pos
if pos > 0:
move.stop()
tribbleFound = 1
print "found Tribble..."
'''
rotate until the pom is centered
then move a bit closer
repeat until the pom is close enough to grab
'''
def gotoTribble():
print "gotoTribble()"
tribbleNear = False
while not tribbleNear:
move.forward(80, 0.2)
centered = False
while not centered:
pos = sensor.getRedPosition()
print "Pos: ", pos
if pos > -10 & pos < 10:
centered = True
elif pos > 0:
move.spinRight()
else:
move.spinLeft()
tribbleNear = sensor.pomInRange()
print "near tribble.."
'''
rotate until the pom is centered
then move a bit closer
repeat until the pom is close enough to grab
'''
def grabTribble():
print "grabTribble()"
move.armDown()
move.forward(50, 1.0)
move.clawClosed()
move.armUp()
move.clawOpen()
print "grabbed tribble..."
##################################
# helper routines
##################################
'''
This function allows us to stop the code
for testing
'''
def DEBUG() :
print "DEBUG"
shutdown()
exit()
##################################
# initialization/shutdown routines
##################################
'''
This function announces the program and
initializes the movement and sensor classes
'''
def init(codeBase) :
print "starting ", codeBase
move.init()
sensor.init()
print "Initialized"
'''
This function shuts everything down
'''
def shutdown() :
print "shutting down..."
move.shutdown()
sensor.shutdown()
print "finished"
| 3.546875 | 4 |
Deep Learning models training/BOW_ann.py | kwstas94/Disaster-Detection-Thesis | 0 | 12767662 | <reponame>kwstas94/Disaster-Detection-Thesis<gh_stars>0
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import nltk
import sklearn
from sklearn.preprocessing import Imputer
# Importing the Keras libraries and packages
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.callbacks import History,EarlyStopping,ModelCheckpoint
#import sys
#sys.setrecursionlimit(1000)
Tweets_number = 79187
def read_dataset():
dataset = pd.read_csv('Clean_Disasters_T_79187_.csv',delimiter = ',' ,converters={'text': str}, encoding = "ISO-8859-1")
return dataset
corpus = []
def make_corpus():
corpus = []
for i in range(0,Tweets_number):
corpus.append(dataset.text[i])
return corpus
def Bow_Split(corpus,dataset,max_features): #### 2-Bag of words model
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
#Count Vecorizer
cv = CountVectorizer(max_features = (max_features))
X = cv.fit_transform(corpus).toarray()
#Split Dataset to X and y
y = dataset.loc[: ,'choose_one'].values
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
return X,y
def Test_Train_Split(X,y,test_size): #Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = test_size, random_state = 42,shuffle=True,stratify=dataset['choose_one'].values)
# Feature Scaling
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
return X_train, X_test, y_train, y_test
def build_ANN(X_train, X_test, y_train, y_test):
history = History()
# history = model.fit(X_train, y_train, batch_size=150, validation_data=(X_test, y_test),
# epochs=10, verbose=1,callbacks=[history])
### Initialising the ANN
classifier = Sequential()
#
# Adding the input layer and the first hidden layer, input_dim = 11 11 nodes eisodou oi 11 steiles gia to train,output_dim=6 oi exodoi nodes (11+1)/2 empeirika
classifier.add(Dense(output_dim = 32, init = 'uniform', activation = 'relu', input_dim = 500))
classifier.add(Dropout(p = 0.5))
# Adding the second hidden layer,
classifier.add(Dense(output_dim = 64, init = 'uniform', activation = 'relu'))
classifier.add(Dropout(p = 0.5))
classifier.add(Dense(output_dim = 128, init = 'uniform', activation = 'relu'))
classifier.add(Dropout(p = 0.5))
#Add more layers if needed
#classifier.add(Dense(output_dim = 256, init = 'uniform', activation = 'relu'))
#classifier.add(Dropout(p = 0.5))
#classifier.add(Dense(output_dim = 512, init = 'uniform', activation = 'relu'))
#classifier.add(Dropout(p = 0.5))
#classifier.add(Dense(output_dim = 1024, init = 'uniform', activation = 'relu'))
#classifier.add(Dropout(p = 0.5))
#classifier.add(Dense(output_dim = 2048, init = 'uniform', activation = 'relu'))
#classifier.add(Dropout(p = 0.5))
# Adding the output layer , output_dim = 1 ena node stin exodo
classifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training set
classifier = classifier.fit(X_train, y_train,validation_data=(X_test,y_test), batch_size = 64, nb_epoch = 120,callbacks=[history])
return classifier
def predict(classifier,X_test):
# Predicting the Test set results- Making the predictions and evaluating the model
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
return y_pred
from sklearn.metrics import confusion_matrix,accuracy_score
from keras.models import load_model
## Making the Confusion Matrix
##cm = confusion_matrix(y_test, y_pred)
def save_model(save_filepath):
classifier.save(save_filepath)
#classifier.summary()
def neural_print(title):
from ann_visualizer.visualize import ann_viz;
ann_viz(classifier, title=title)
def load_my_model(save_filepath):
classifier = load_model(save_filepath)
return classifier
##ANN with cross validation
def Build_CV_ANN(X_train, X_test, y_train, y_test):
# Evaluating the ANN
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from keras.models import Sequential
from keras.layers import Dense
history = History()
def build_classifier():
classifier = Sequential()
classifier.add(Dense(output_dim = 32, init = 'uniform', activation = 'relu', input_dim = 500))
classifier.add(Dropout(p = 0.1))
# Adding the second hidden layer,
classifier.add(Dense(output_dim = 64, init = 'uniform', activation = 'relu'))
classifier.add(Dropout(p = 0.1))
classifier.add(Dense(output_dim = 128, init = 'uniform', activation = 'relu'))
classifier.add(Dropout(p = 0.1))
# classifier.add(Dense(output_dim = 256, init = 'uniform', activation = 'relu'))
# classifier.add(Dropout(p = 0.1))
# classifier.add(Dense(output_dim = 512, init = 'uniform', activation = 'relu'))
# classifier.add(Dropout(p = 0.1))
# classifier.add(Dense(output_dim = 1024, init = 'uniform', activation = 'relu'))
# classifier.add(Dropout(p = 0.1))
#Adding the output layer , output_dim = 1 ena node stin exodo
classifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
return classifier
classifier = KerasClassifier(build_fn = build_classifier, batch_size = 64, epochs = 120,callbacks=[history])
accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10, n_jobs = -1)
mean = accuracies.mean()
variance = accuracies.std()
print('CV Mean:',mean)
print('CV Variance',variance)
return classifier
def make_curve(history):
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
######MAIN#########
save_filepath = '**path**/Ann_BOW_clf.h5'
#REad dataset
dataset = read_dataset()
#Make the corpus
corpus = make_corpus()
#Split X,y and test,train
X,y= Bow_Split(corpus,dataset,max_features=500)
X_train, X_test, y_train, y_test = Test_Train_Split(X,y,test_size = 0.3)
#Build the ANN model
history = build_ANN(X_train, X_test, y_train, y_test)
#Build a CV=10 ANN
#classifier = Build_CV_ANN(X_train, X_test, y_train, y_test)
#Load a compiled model
classifier = load_my_model(save_filepath)
#y_pred = predict(classifier,X_test)
#print(accuracy_score(y_test, y_pred))
#MAke plots
make_curve(history)
### Save Keras Model
#save_model(save_filepath)
| 3.0625 | 3 |
app/routes.py | WebDevSand/python-mysql-panel | 1 | 12767663 | from flask import render_template, flash, redirect, url_for, request
import pymysql
import json
from app import *
from app.form import ServerInfo
from app.handler import *
@app.route('/', methods=['GET', 'POST'])
def login():
form = ServerInfo()
if form.validate_on_submit():
flash('Login requested for user {}, remember_me={}'.format(
form.username.data, form.remember_me.data))
return redirect(url_for('databases'))
return render_template('index.html', form=form)
@app.route("/connect", methods=['GET', 'POST'])
def connect():
try:
mysql = pymysql.connect(
request.form["host"],
request.form["username"],
request.form["password"],
charset='utf8mb4'
)
data = connect_to_db(mysql, "SHOW DATABASES")
result = ''
for database in data:
result += "<option value=\""+database[0]+"\">"+database[0]+"</option>"
json_data = {"status": "success", "result": result}
return json.dumps(json_data)
except:
json_data = {"status": "error", "message": "Couldn't connect to MySQL, check your credentials!"}
return json_data
@app.route("/generate", methods=['GET', 'POST'])
def generate():
mysql = pymysql.connect(
request.form["host"],
request.form["username"],
request.form["password"],
request.form["database"],
charset='utf8mb4')
data = generate_mage(mysql, request.form["createUsers"])
return data
| 2.578125 | 3 |
practica1/software/FUENTES/algorithms/relief.py | antoniomdk/practicas_mh_ugr | 0 | 12767664 | <filename>practica1/software/FUENTES/algorithms/relief.py
import numpy as np
"""
In order to use the module pykdtree the system must have
OpenMP support. If there is any problem during the installation
process, we can use instead the KDTree implementation from
scipy.spatial module.
"""
try:
from pykdtree.kdtree import KDTree
except ImportError:
from scipy.spatial import cKDTree as KDTree
def _relief(X, Y):
W = np.zeros(X.shape[1])
for i in range(X.shape[0]):
x, y = X[i, :], Y[i]
X_same_class = X[Y == y]
X_other_class = X[Y != y]
# Calculate the second nearest neighbor with the same class.
ally = KDTree(X_same_class).query(x.reshape(1, -1), k=2)[1][0][1]
# Calculate the nearest neighbor with a different class.
enemy = KDTree(X_other_class).query(x.reshape(1, -1), k=1)[1][0]
ally = X_same_class[ally]
enemy = X_other_class[enemy]
# Update the weights using the L1 distance.
W += np.abs(x - enemy) - np.abs(x - ally)
W[W < 0] = 0
W /= np.max(W)
return W
class Relief():
"""
Docstring: Wrapper class for Relief algorithm that provided
sklearn-based syntax.
"""
def __init__(self, threshold=0.2):
self.feature_importances = []
self.reduction = 0
self.threshold = threshold
def fit(self, X, Y):
self.feature_importances = _relief(X, Y)
self.reduction = np.sum(self.feature_importances < self.threshold)
self.reduction /= len(self.feature_importances)
def transform(self, X):
return (X * self.feature_importances
)[:, self.feature_importances > self.threshold]
def fit_transform(self, X, y):
self.fit(X, y)
return self.transform(X)
| 3.421875 | 3 |
Ops/Timecode.py | davidsoncolin/IMS | 0 | 12767665 | import math
from datetime import datetime, timedelta
from Ops import Op
def splitTc(tc):
hrs, mins, secs, frames = tc.split(":")
return hrs, mins, secs, frames
def TCFtoInt(tc, fps):
hrs, mins, secs, frames = splitTc(tc)
fps = math.ceil(float(fps))
if hrs != "" and mins != "" and secs != "" and frames != "" and fps != "":
mins = (int(hrs) * 60) + int(mins)
secs = (int(mins) * 60) + int(secs)
frames = (int(secs) * (int(fps))) + int(frames)
return frames
return None
def TCSub(tc1, tc2, fps):
""" tc1 minus tc2 == Result """
tc1hr, tc1min, tc1sec, tc1frame = splitTc(tc1)
tc2hr, tc2min, tc2sec, tc2frame = splitTc(tc2)
tc1Delta = timedelta(hours=int(tc1hr), minutes=int(tc1min), seconds=int(tc1sec))
tc2Delta = timedelta(hours=int(tc2hr), minutes=int(tc2min), seconds=int(tc2sec))
tcDate = datetime.fromtimestamp(int(tc1Delta.total_seconds()) - int(tc2Delta.total_seconds()))
totalFrames = int(tc1frame) - int(tc2frame)
if totalFrames < 0:
totalFrames += fps
tcDate = tcDate - timedelta(seconds=1)
return "%s:%02d" % (tcDate.strftime("%H:%M:%S"), int(totalFrames))
class SetFrameRange(Op.Op):
def __init__(self, name='/SetFrameRange', locations=''):
fields = [
('name', 'name', 'name', 'string', name, {}),
('locations', 'locations', 'Location containing a frame range', 'string', locations, {}),
]
super(self.__class__, self).__init__(name, fields)
def cook(self, location, interface, attrs):
if not interface.opParamsDirty(): return
# Get frame range from location and set if we find any
frameRange = interface.attr('frameRange')
if not frameRange: return
if len(frameRange) != 2: return
interface.setFrameRange(frameRange[0], frameRange[1])
interface.updateTimeline()
self.logger.info('Set range to [%d, %d]' % (frameRange[0], frameRange[1]))
# Register Ops
import Registry
Registry.registerOp('Set Frame Range', SetFrameRange)
| 2.953125 | 3 |
tests/dockerfile/test_runner.py | tkishel/checkov | 1 | 12767666 | <reponame>tkishel/checkov<filename>tests/dockerfile/test_runner.py
import unittest
import os
from checkov.dockerfile.runner import Runner
from checkov.runner_filter import RunnerFilter
class TestRunnerValid(unittest.TestCase):
def test_runner_empty_dockerfile(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources/empty_dockerfile"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework='all'))
self.assertEqual(report.failed_checks, [])
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.passed_checks, [])
self.assertEqual(report.skipped_checks, [])
report.print_console()
def test_runner_failing_check(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources/expose_port/fail"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework='all',checks=['CKV_DOCKER_1']))
self.assertEqual(len(report.failed_checks), 1)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.passed_checks, [])
self.assertEqual(report.skipped_checks, [])
report.print_console()
def test_runner_failing_check_with_file_path(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_file_path = current_dir + "/resources/expose_port/fail/Dockerfile"
runner = Runner()
report = runner.run(
files=[valid_file_path],
external_checks_dir=None,
runner_filter=RunnerFilter(framework="all", checks=["CKV_DOCKER_1"]),
)
self.assertEqual(len(report.failed_checks), 1)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.passed_checks, [])
self.assertEqual(report.skipped_checks, [])
report.print_console()
def test_runner_passing_check(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources/expose_port/pass"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework='all',checks=['CKV_DOCKER_1']))
self.assertEqual(len(report.passed_checks), 1)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.failed_checks, [])
self.assertEqual(report.skipped_checks, [])
report.print_console()
def test_runner_skip_check(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources/expose_port/skip"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None,
runner_filter=RunnerFilter(framework='all',checks=['CKV_DOCKER_1']))
self.assertEqual(len(report.skipped_checks), 1)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(report.failed_checks, [])
self.assertEqual(report.passed_checks, [])
report.print_console()
if __name__ == '__main__':
unittest.main()
| 2.40625 | 2 |
model_training_and_evaluation/simple_scripts/plot_bootstrapped_f1s.py | skdreier/NIreland_NLP | 1 | 12767667 | import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import matplotlib
matplotlib.use('Agg')
# True indicates it's for 10-class multiway version
test_f1s_filename = '/Users/sofiaserrano/Downloads/paperResults/binaryTEST_withcontext_bootstrappedf1s.csv'
dev_f1s_filename = '/Users/sofiaserrano/Downloads/paperResults/binaryDEV_withcontext_bootstrappedf1s.csv'
tag = 'binary'
sns.set()
def read_in_data(fname):
roberta_f1s = []
baseline_f1s = []
with open(fname, 'r') as f:
field_names = f.readline().strip().split(',')
roberta_is_field_0 = (field_names[0] == 'bootstrapped_f1_roberta')
if not roberta_is_field_0:
assert field_names[0] == 'bootstrapped_f1_baseline'
for line in f:
string_vals = line.strip().split(',')
if roberta_is_field_0:
roberta_f1s.append(float(string_vals[0]))
baseline_f1s.append(float(string_vals[1]))
else:
roberta_f1s.append(float(string_vals[1]))
baseline_f1s.append(float(string_vals[0]))
return roberta_f1s, baseline_f1s
dev_roberta_f1s, dev_baseline_f1s = read_in_data(dev_f1s_filename)
test_roberta_f1s, test_baseline_f1s = read_in_data(test_f1s_filename)
list_of_row_dicts = []
for data_point in dev_roberta_f1s:
row_dict = {'Data split': 'Dev',
"Model": "RoBERTa",
"Bootstrapped F1 score": data_point}
list_of_row_dicts.append(row_dict)
for data_point in test_roberta_f1s:
row_dict = {'Data split': 'Test',
"Model": "RoBERTa",
"Bootstrapped F1 score": data_point}
list_of_row_dicts.append(row_dict)
for data_point in dev_baseline_f1s:
row_dict = {'Data split': 'Dev',
"Model": "Baseline",
"Bootstrapped F1 score": data_point}
list_of_row_dicts.append(row_dict)
for data_point in test_baseline_f1s:
row_dict = {'Data split': 'Test',
"Model": "Baseline",
"Bootstrapped F1 score": data_point}
list_of_row_dicts.append(row_dict)
data_to_plot = pd.DataFrame(list_of_row_dicts)
fig = plt.figure(figsize=(12, 4))
#plt.ylim(0, 1)
ax = sns.boxplot(x="Data split", y="Bootstrapped F1 score", hue="Model", data=data_to_plot, palette='PuOr')
plt.title('Bootstrapped F1 scores for ' + tag + ' held-out data')
plt.savefig('/Users/sofiaserrano/Downloads/paperResults/BootstrappedF1s' + tag + '.png', bbox_inches='tight')
plt.close(fig)
| 2.59375 | 3 |
drive-contents/boot_options/low_battery.py | TG-Techie/TG-Watch-Software | 1 | 12767668 | import time
import microcontroller
from hardware import drivers
if drivers.vbus_detect.value:
from . import low_battery_splash
while drivers._read_bat_percent() < 4:
time.sleep(5)
print(f"[time: {time.monotonic()}] battery too low, waiting to boot")
else:
print("battery charged to 4+ percent, restarting")
microcontroller.reset()
| 3.046875 | 3 |
SRL/main.py | NarenKul/Biaffine-Parsing-Pytorch | 10 | 12767669 | <gh_stars>1-10
import torch
from torchtext import data
import numpy as np
from pbase import app, logger
from model import SRL
import os
print(os.getpid())
WORD = data.Field(batch_first=True, lower=True)
PLEMMA = data.Field(batch_first=True, lower=True)
PPOS = data.Field(batch_first=True)
DEP = data.Field(batch_first=True, use_vocab=False, preprocessing=lambda x: [int(y) for y in x], pad_token=-1)
LABEL = data.Field(batch_first=True)
INDICATOR = data.Field(batch_first=True)
SLABEL = data.Field(batch_first=True)
fields = [('WORD', WORD), ('PLEMMA', PLEMMA), ('PPOS', PPOS), ('DEP', DEP), ('LABEL', LABEL),
('INDICATOR', INDICATOR), ('SLABEL', SLABEL)]
include_test = [False, False, False, False, False, False, False]
params = [{"min_freq":2}, {}, {}, {}, {}, {}, {}]
class Args(app.ArgParser):
def __init__(self):
super(Args, self).__init__(description="SRL", patience=10000)
self.parser.add_argument('--word_dim', type=int, default=100)
self.parser.add_argument('--pos_dim', type=int, default=100)
self.parser.add_argument('--lem_dim', type=int, default=100)
self.parser.add_argument('--srl_dim', type=int, default=100)
self.parser.add_argument('--is_verb_dim', type=int, default=10)
self.parser.add_argument('--lstm_hidden', type=int, default=400)
self.parser.add_argument('--num_lstm_layer', type=int, default=4)
self.parser.add_argument('--lstm_dropout', type=float, default=0.25)
self.parser.add_argument('--vector_cache', type=str,
default="/mnt/collections/p8shi/dev/biaffine/Biaffine/data/glove.100d.conll09.pt")
self.parser.add_argument('--lr', type=float, default='1e-2')
self.parser.add_argument('--tensorboard', type=str, default='logs')
class Trainer(app.TrainAPP):
def __init__(self, **kwargs):
super(Trainer, self).__init__(**kwargs)
self.config.word_num = len(self.WORD.vocab)
self.config.pos_num = len(self.PPOS.vocab)
self.config.lem_num = len(self.PLEMMA.vocab)
self.config.is_verb_num = len(self.INDICATOR.vocab)
self.config.srl_num = len(self.SLABEL.vocab)
self.config.PAD = WORD.vocab.stoi['<pad>']
self.config.ISVERB = INDICATOR.vocab.stoi['1']
stoi, vectors, dim = torch.load(self.config.vector_cache)
match_embedding = 0
self.WORD.vocab.vectors = torch.Tensor(len(self.WORD.vocab), dim)
for i, token in enumerate(self.WORD.vocab.itos):
wv_index = stoi.get(token, None)
if wv_index is not None:
self.WORD.vocab.vectors[i] = vectors[wv_index]
match_embedding += 1
else:
self.WORD.vocab.vectors[i] = torch.FloatTensor(self.config.word_dim).uniform_(-0.5, 0.5)
print("Matching {} out of {}".format(match_embedding, len(self.WORD.vocab)))
def prepare(self, **kwargs):
super(Trainer, self).prepare(**kwargs)
self.model.pre_embed.weight.data.copy_(self.WORD.vocab.vectors)
print(self.model)
print(self.SLABEL.vocab.itos)
print(self.config)
class optimizer:
def __init__(self, parameter, config):
self.optim = torch.optim.Adam(parameter, lr=config.lr)
#self.optim = torch.optim.SGD(parameter, lr=config.lr)
l = lambda epoch: 0.75 ** (epoch // 1)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(self.optim, lr_lambda=l)
def zero_grad(self):
self.optim.zero_grad()
def step(self):
self.optim.step()
def schedule(self):
self.scheduler.step()
print("learning rate : ", self.scheduler.get_lr(), self.scheduler.base_lrs)
class criterion:
def __init__(self):
# if args.cuda:
# weight = torch.FloatTensor([1, 0.9] + [1] * 53).cuda()
# else:
# weight = torch.FloatTensor([1, 0.9] + [1] * 53)
# self.crit = torch.nn.CrossEntropyLoss(weight=weight)
self.crit = torch.nn.CrossEntropyLoss()
def __call__(self, output, batch):
return self.crit(output.view(-1, output.size(2)), batch.SLABEL.view(-1, 1)[:,0])
def evaluator(name, pairs):
# pair = (batch_output, batch_example)
if type(pairs) != list and type(pairs) == tuple:
pairs = [pairs]
n_correct = 0
n_total = 0
n_predicted = 0
acc_total = 0
acc_right = 0
for (output, batch) in pairs:
acc_right += (torch.max(output, 2)[1].view(batch.SLABEL.size()).data == batch.SLABEL.data).sum(dim=1).sum()
size_list = list(batch.SLABEL.size())
acc_total += size_list[0] * size_list[1]
pred = torch.max(output, 2)[1].view(batch.SLABEL.size()).cpu().data.numpy()
gold = batch.SLABEL.cpu().data.numpy()
for pred_sent, gold_sent in zip(pred, gold):
for pred_token, gold_token in zip(pred_sent, gold_sent):
if gold_token == SLABEL.vocab.stoi['<pad>']:
continue
if pred_token == gold_token and gold_token != SLABEL.vocab.stoi['_']:
n_correct += 1
if pred_token != SLABEL.vocab.stoi['_']:
n_predicted += 1
if gold_token != SLABEL.vocab.stoi['_']:
n_total += 1
if n_predicted == 0:
precision = 0
else:
precision = n_correct / n_predicted
if n_total == 0:
recall = 0
else:
recall = n_correct / n_total
if precision + recall == 0:
f1 = 0
else:
f1 = 2 * precision * recall / (precision + recall)
if acc_total == 0:
acc = 0
else:
acc = acc_right / acc_total
return acc, precision, recall, f1
def metrics_comparison(new_metrics, best_metrics):
if best_metrics == None or new_metrics[3] > best_metrics[3]:
return True
return False
def log_printer(name, metrics, loss, epoch=None, iters=None):
if name == 'train':
print("{}\tEPOCH : {}\tITER : {}\tACC : {}\tP : {}\tR : {}\tF : {}\tNearest batch training LOSS : {}".format(
name, epoch, iters, metrics[0], metrics[1], metrics[2], metrics[3], loss
))
step = int(iters.split('/')[0]) + int(iters.split('/')[1]) * (epoch-1)
log.scalar_summary(tag="loss", value=loss, step=step)
else:
if loss == None:
print("{}\tACC : {}\tP : {}\tR : {}\tF : {}".format(name, metrics[0], metrics[1], metrics[2], metrics[3]))
else:
print("{}\tACC : {}\tP : {}\tR : {}\tF : {}".format(name, metrics[0], metrics[1], metrics[2], metrics[3], loss))
if iters != None and epoch != None and loss != None:
step = int(iters.split('/')[0]) + int(iters.split('/')[1]) * (epoch - 1)
log.scalar_summary(tag="valid_loss", value=loss, step=step)
if __name__=='__main__':
arg_parser = Args()
args = arg_parser.get_args()
log = logger.Logger(args.tensorboard)
crit = criterion()
trainer = Trainer(args=args, fields=fields, include_test=include_test,
build_vocab_params=params)
trainer.prepare(model=SRL, optimizer=optimizer, criterion=crit,
evaluator=evaluator, metrics_comparison=metrics_comparison,
log_printer=log_printer)
trainer.train()
| 2.125 | 2 |
tensorflow_graphics/projects/cvxnet/train.py | Liang813/graphics | 2,759 | 12767670 | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training Loop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_graphics.projects.cvxnet.lib import datasets
from tensorflow_graphics.projects.cvxnet.lib import models
from tensorflow_graphics.projects.cvxnet.lib import utils
tf.disable_eager_execution()
flags = tf.app.flags
logging = tf.logging
tf.logging.set_verbosity(tf.logging.INFO)
utils.define_flags()
FLAGS = flags.FLAGS
def main(unused_argv):
tf.set_random_seed(2191997)
np.random.seed(6281996)
logging.info("=> Starting ...")
# Select dataset.
logging.info("=> Preparing datasets ...")
data = datasets.get_dataset(FLAGS.dataset, "train", FLAGS)
batch = tf.data.make_one_shot_iterator(data).get_next()
# Select model.
logging.info("=> Creating {} model".format(FLAGS.model))
model = models.get_model(FLAGS.model, FLAGS)
optimizer = tf.train.AdamOptimizer(FLAGS.lr)
# Set up the graph
train_loss, train_op, global_step = model.compute_loss(
batch, training=True, optimizer=optimizer)
# Training hooks
stop_hook = tf.train.StopAtStepHook(last_step=FLAGS.max_steps)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir)
ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
summary_hook = tf.train.SummarySaverHook(
save_steps=100, summary_writer=summary_writer, summary_op=ops)
step_counter_hook = tf.train.StepCounterHook(summary_writer=summary_writer)
hooks = [stop_hook, step_counter_hook, summary_hook]
logging.info("=> Start training loop ...")
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=hooks,
scaffold=None,
save_checkpoint_steps=FLAGS.save_every,
save_checkpoint_secs=None,
save_summaries_steps=None,
save_summaries_secs=None,
log_step_count_steps=None,
max_wait_secs=3600) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run([batch, train_loss, global_step, train_op])
if __name__ == "__main__":
tf.app.run(main)
| 1.976563 | 2 |
proc.py | riceissa/fred-processing | 0 | 12767671 | <reponame>riceissa/fred-processing<filename>proc.py
#!/usr/bin/env python3
import requests
import os
import sys
from devec_sql_common import *
with open("apikey.txt", "r") as f:
API_KEY = next(f).strip()
def get_tags_from_file():
try:
with open("fred_tags", "r") as f:
return [line.strip() for line in f]
except FileNotFoundError:
return []
def get_tags():
tags_endpoint = "https://api.stlouisfed.org/fred/tags"
tags = []
count = 1 # This just has to be greater than 0
# The API limits to 1000 tags per query, so keep bumping up the offset
# until we get them all
while len(tags) < count:
r = requests.get(tags_endpoint, params={
"api_key": API_KEY,
"file_type": "json",
"offset": len(tags),
})
j = r.json()
tags.extend([x["name"] for x in j["tags"]])
count = j["count"]
with open("fred_tags", "w") as f:
for tag in tags:
f.write(tag + "\n")
return tags
def get_series_names_for_tag(tag):
tags_series_endpoint = "https://api.stlouisfed.org/fred/tags/series"
result = []
count = 1 # This just has to be greater than 0
while len(result) < count:
r = requests.get(tags_series_endpoint, params={
"api_key": API_KEY,
"file_type": "json",
"tag_names": tag,
"offset": len(result),
})
j = r.json()
result.extend([x["id"] for x in j["seriess"]])
count = j["count"]
print(len(result), file=sys.stderr)
return result
def get_all_series_names_from_file():
try:
with open("fred_series_names", "r") as f:
result = []
for line in f:
lst = line.strip().split("\t")
result.append((lst[0], lst[1]))
return result
except FileNotFoundError:
return []
def get_all_series_names(tags):
result = []
seen = set()
with open("fred_series_names", "w") as f:
for tag in tags:
print("DOING", tag, file=sys.stderr)
for s in get_series_names_for_tag(tag):
if s not in seen:
result.append((tag, s))
f.write("{}\t{}\n".format(tag, s))
seen.add(s)
f.flush()
os.fsync(f.fileno())
return result
def get_series_observations(series_name):
r = requests.get("https://api.stlouisfed.org/fred/series", params={
"api_key": API_KEY,
"file_type": "json",
"series_id": series_name,
})
j = r.json()
title = j["seriess"][0]["title"]
units = j["seriess"][0]["units"]
endpoint = "https://api.stlouisfed.org/fred/series/observations"
n = 0 # Track how many observations we have gotten
count = 1 # This just has to be greater than 0
while n < count:
r = requests.get(endpoint, params={
"api_key": API_KEY,
"file_type": "json",
"series_id": series_name,
"offset": n,
})
j = r.json()
for x in j["observations"]:
n += 1
yield {"date": x["date"],
"value": x["value"],
"database_version": x["realtime_start"],
"units": units,
"title": title}
count = j["count"]
def print_sql_rows(series_name):
print("# " + series_name)
insert_line = "insert into data(region, odate, database_url, database_version, data_retrieval_method, metric, units, value, notes) values"
count = 0
first = True
for ob in get_series_observations(series_name):
if ob["value"] not in ["."]:
if first:
print(insert_line)
print(" " + ("" if first else ",") + "(" + ",".join([
mysql_quote(region_normalized("United States?")), # region
mysql_string_date(ob["date"]), # odate
mysql_quote("https://research.stlouisfed.org/docs/api/fred/"), # database_url
mysql_quote(ob["database_version"]), # database_version
mysql_quote(""), # data_retrieval_method
mysql_quote(ob["title"]), # metric
mysql_quote(ob["units"]), # units
mysql_float(ob["value"]), # value
mysql_quote(""), # notes
]) + ")")
first = False
count += 1
if count > 5000:
count = 0
first = True
print(";")
if not first:
print(";")
if __name__ == "__main__":
# tags = get_tags_from_file()
# if not tags:
# tags = get_tags()
series_names = get_all_series_names_from_file()
if not series_names:
series_names = get_all_series_names(tags)
print_insert_header()
for s in series_names:
try:
print_sql_rows(s[1])
except:
print("# failed for {}, skipping".format(s))
print_insert_footer()
| 2.734375 | 3 |
tests/test_issues/test_issue_332.py | bpow/linkml | 0 | 12767672 | <filename>tests/test_issues/test_issue_332.py
import unittest
from linkml.generators.jsonldgen import JSONLDGenerator
from tests.test_issues.environment import env
class JSONContextTestCase(unittest.TestCase):
def test_context(self):
""" Test no context in the argument"""
self.assertIn('''"@context": [
[
"https://w3id.org/linkml/meta.context.jsonld"
],''', JSONLDGenerator(env.input_path('issue_332.yaml')).serialize())
def test_context_2(self):
""" Test a single context argument """
self.assertIn('''"@context": [
[
"http://some.org/nice/meta.context.jsonld"
],''', JSONLDGenerator(env.input_path('issue_332.yaml')).serialize(context="http://some.org/nice/meta.context.jsonld"))
def test_context_3(self):
""" Test multi context arguments """
self.assertIn('''"@context": [
[
"http://some.org/nice/meta.context.jsonld",
"http://that.org/meta.context.jsonld"
],''', JSONLDGenerator(env.input_path('issue_332.yaml')).
serialize(context=["http://some.org/nice/meta.context.jsonld", "http://that.org/meta.context.jsonld"]))
if __name__ == '__main__':
unittest.main()
| 2.65625 | 3 |
firm-founding-wiki-search.py | merpaker/firm-founding-wiki-search | 0 | 12767673 | <reponame>merpaker/firm-founding-wiki-search<gh_stars>0
#################################################################
# Wikipedia Search for Historical Firm Founding Dates
# firm-founding-wiki-search.py
# Author: <NAME>, <EMAIL>
# Last edited: 17 November, 2017
# This script uses python's Wikipedia package search functionality
# to link a historical firm with its suggested Wikipedia information
# in order to predict the correct founding date for the firm.
#################################################################
import wikipedia
import re
### IMPORT COMPANY LIST ###
company_list = open('CompanyList.txt', encoding='utf-16')
company_list_text = company_list.read()
company_list.close()
company_set = company_list_text.split('\n')
wiki_output = open('WikipediaFoundingDates.txt', mode='a', encoding='utf-8')
### CYCLE THROUGH EACH COMPANY ###
for company in company_set:
# Search for company on wikipedia
isWikiPage = 1
try:
search_results = wikipedia.search(company)[0]
except IndexError as f:
print("Error: {0}".format(f))
search_results = ""
isWikiPage = 0
# Make new wikipedia object for the company
if isWikiPage == 1:
try:
company_wiki = wikipedia.page(search_results)
except wikipedia.exceptions.DisambiguationError as e:
print("Error: {0}".format(e))
isWikiPage = 0
# Save the plain text of the wiki page
if isWikiPage == 1:
wiki_text = company_wiki.content
else:
wiki_text = ""
# Parse plain text paragraphs into sentences
from nltk.tokenize import sent_tokenize
wiki_sentences = sent_tokenize(wiki_text)
# Store full set of sentences
wiki_sentences_full = wiki_sentences*1
# Delete sentences without the company name
for i in reversed(range(len(wiki_sentences))):
if company not in wiki_sentences[i]:
del wiki_sentences[i]
# Store set of sentences with company name
wiki_sentences_company = wiki_sentences*1
# Delete sentences without "founded" or "established" from
# set of sentences with company name
for i in reversed(range(len(wiki_sentences))):
if ("founded" or "established") not in wiki_sentences[i]:
del wiki_sentences[i]
# Store set of sentences with company name and founding
wiki_sentences_founded = wiki_sentences*1
# Generate output string
company_output = company
company_output += "; "
company_output += search_results
company_output += "; "
# Search for years in all three scopes: full page, company sentences, founded/established
# Put all years in output string
# Update best-guess as appropriate
# Update flag if best guess is from full page
best_guess = 9000
conf = 0
# Full wikipedia page search
for i in range(len(wiki_sentences_full)):
d = re.findall('\d{4}', wiki_sentences_full[i])
for j in range(len(d)):
if int(d[j]) > 1600:
if int(d[j]) < 1913:
company_output += d[j]
company_output += ", "
if not wiki_sentences_company:
if int(d[j]) < best_guess:
best_guess = int(d[j])
conf = 1
company_output += "; "
# Search in sentences with company name
for i in range(len(wiki_sentences_company)):
d = re.findall('\d{4}', wiki_sentences_company[i])
for j in range(len(d)):
if int(d[j]) > 1600:
if int(d[j]) < 1913:
company_output += d[j]
company_output += ", "
if not wiki_sentences_founded:
if int(d[j]) < best_guess:
best_guess = int(d[j])
conf = 2
company_output += "; "
# Search in sentences with company name & "founded" or "established"
for i in range(len(wiki_sentences_founded)):
d = re.findall('\d{4}', wiki_sentences_founded[i])
for j in range(len(d)):
if int(d[j]) > 1600:
if int(d[j]) < 1913:
company_output += d[j]
company_output += ", "
if int(d[j]) < best_guess:
best_guess = int(d[j])
conf = 3
company_output += "; "
company_output += str(conf)
company_output += "; "
company_output += str(best_guess)
print(company_output)
wiki_output.write(company_output)
# So final company_output string will be as follows:
# company name, wikipedia page or blank, date(s) from full page,
# dates from sentences with company name, dates from sentences with
# company name and "founded" or "established", confidence in predicted
# founded year, best guess at founded year
wiki_output.close()
| 3.1875 | 3 |
tests/api_resources/test_subscription_item.py | bhch/async-stripe | 8 | 12767674 | from __future__ import absolute_import, division, print_function
import stripe
import pytest
pytestmark = pytest.mark.asyncio
TEST_RESOURCE_ID = "si_123"
class TestSubscriptionItem(object):
async def test_is_listable(self, request_mock):
resources = await stripe.SubscriptionItem.list(subscription="sub_123")
request_mock.assert_requested(
"get", "/v1/subscription_items", {"subscription": "sub_123"}
)
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.SubscriptionItem)
async def test_is_retrievable(self, request_mock):
resource = await stripe.SubscriptionItem.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/subscription_items/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.SubscriptionItem)
async def test_is_creatable(self, request_mock):
resource = await stripe.SubscriptionItem.create(
price="price_123", subscription="sub_123"
)
request_mock.assert_requested("post", "/v1/subscription_items")
assert isinstance(resource, stripe.SubscriptionItem)
async def test_is_saveable(self, request_mock):
resource = await stripe.SubscriptionItem.retrieve(TEST_RESOURCE_ID)
resource.price = "price_123"
await resource.save()
request_mock.assert_requested(
"post",
"/v1/subscription_items/%s" % TEST_RESOURCE_ID,
{"price": "price_123"},
)
async def test_is_modifiable(self, request_mock):
resource = await stripe.SubscriptionItem.modify(
TEST_RESOURCE_ID, price="price_123"
)
request_mock.assert_requested(
"post",
"/v1/subscription_items/%s" % TEST_RESOURCE_ID,
{"price": "price_123"},
)
assert isinstance(resource, stripe.SubscriptionItem)
async def test_is_deletable(self, request_mock):
resource = await stripe.SubscriptionItem.retrieve(TEST_RESOURCE_ID)
await resource.delete()
request_mock.assert_requested(
"delete", "/v1/subscription_items/%s" % TEST_RESOURCE_ID
)
assert resource.deleted is True
async def test_can_delete(self, request_mock):
resource = await stripe.SubscriptionItem.delete(TEST_RESOURCE_ID)
request_mock.assert_requested(
"delete", "/v1/subscription_items/%s" % TEST_RESOURCE_ID
)
assert resource.deleted is True
class TestUsageRecords(object):
async def test_is_creatable(self, request_mock):
resource = await stripe.SubscriptionItem.create_usage_record(
TEST_RESOURCE_ID,
quantity=5000,
timestamp=1524182400,
action="increment",
)
request_mock.assert_requested(
"post",
"/v1/subscription_items/%s/usage_records" % TEST_RESOURCE_ID,
)
assert isinstance(resource, stripe.UsageRecord)
class TestUsageRecordSummaries(object):
async def test_is_listable(self, request_mock):
resource = await stripe.SubscriptionItem.list_usage_record_summaries(
TEST_RESOURCE_ID
)
request_mock.assert_requested(
"get",
"/v1/subscription_items/%s/usage_record_summaries"
% TEST_RESOURCE_ID,
)
assert isinstance(resource.data, list)
assert isinstance(resource.data[0], stripe.UsageRecordSummary)
| 2.28125 | 2 |
MYgames_package/MYgames/__main__.py | MandiYang/MYgames | 1 | 12767675 | <filename>MYgames_package/MYgames/__main__.py
print('Hello')
print('Package name: MYgames')
| 1.1875 | 1 |
bptf/pretraining.py | antonyms/AntonymPipeline | 4 | 12767676 | <gh_stars>1-10
#!/usr/bin/python
from pretraining import mm
from pretraining.utils import *
rootPath = '../data/'
thPath = rootPath + 'combine/'
antPath = thPath + 'antonym.txt'
synPath = thPath + 'synonym.txt'
grePath = rootPath + 'test-data/gre_wordlist.txt'
commonPath = rootPath + 'test-data/test_wordlist.txt'
topPath = '../data/Rogets/top' #TODO
outPath = thPath + 'combine_mm'
vocPath = outPath + '-voc'
zero_proportion = 3
sim_proportion = 1#TODO
gre = load_dic(grePath)
common = load_dic(commonPath)
top = load_dic(topPath)
top = set(top)
print 'common before: %d' % len(common)
common = [x for x in common if x in top]
print 'common after: %d' % len(common)
gre = set(gre)
print 'gre before: %d' % len(gre)
gre = gre.union(common)
print 'gre after: %d' % len(gre)
voc = get_more_voc_from_ant(antPath, gre)
#voc = get_more_voc_from_synant(antPath, synPath, gre)
#voc = get_voc_from_synant(antPath, synPath, gre)
print 'finally get vocabulary: %d' % len(voc)
save_dic(vocPath, voc)
word2id, N = get_voc_dic(voc)
"""
Calculate pair-wise cosine product of morpho vectors.
"""
savePath = thPath + "sim.txt"
mm.vec2sim(vocPath, savePath, sample_rate=.05,tag=False)
"""
Generate MatrixMarket format file for training.
"""
simPath = thPath + 'sim.txt'
mm.sim2mm(antPath, synPath, vocPath, simPath, outPath,
zero_proportion, sim_proportion)
| 2.484375 | 2 |
testcontainers/oracle.py | martykube/testcontainers-python | 0 | 12767677 | from testcontainers.core.generic import DbContainer
class OracleDbContainer(DbContainer):
def __init__(self, image="wnameless/oracle-xe-11g-r2:latest"):
super(OracleDbContainer, self).__init__(image=image)
self.container_port = 1521
self.with_exposed_ports(self.container_port)
def _configure(self):
self.with_env("ORACLE_ALLOW_REMOTE", "true")
def get_connection_url(self):
return super()._create_connection_url(dialect="oracle",
username="system",
password="<PASSWORD>",
port=self.container_port,
db_name="xe")
| 2.109375 | 2 |
scripts/isqrt.py | Marlon-Lazo-Coronado/tiny-bignum-c | 331 | 12767678 | <reponame>Marlon-Lazo-Coronado/tiny-bignum-c
#isqrt.py
import math
def isqrt(n):
if n == 0: return 0
high = n
low = 0
calcMid = lambda: (high - low) / 2 + low + 1
mid = calcMid()
while high > low:
sq = mid**2
if sq > n:
high = mid - 1
else:
low = mid
mid = calcMid()
return low
if __name__ == "__main__":
for i in range(10000000):
sq = isqrt(i)
if sq != int(math.sqrt(i)):
print "Failed on {}: {}".format(i, sq)
elif i % 100000==0: print i
| 3.390625 | 3 |
tests/servers/server_302.py | jun-kai-xin/douban | 0 | 12767679 | <gh_stars>0
from http.server import BaseHTTPRequestHandler, HTTPServer
class Handler(BaseHTTPRequestHandler):
def do_GET(self):
self.log_message(' '.join(['{:s}:{:s}'.format(key, value) for (key, value) in self.headers.items()]))
self.send_response(302)
if self.path.find('sorry') > 0:
self.send_header('Location', 'https://movie.douban.com/sorry?')
else:
self.send_header('Location', 'https://movie.douban.com/')
self.end_headers()
if __name__ == '__main__':
httpd = HTTPServer(('', 5000), Handler)
print('Starting httpd...')
httpd.serve_forever()
| 2.921875 | 3 |
artifacts/models/seq2seq/seq2seq_inference.py | LittleQili/nnfusion | 0 | 12767680 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import sys
import tensorflow as tf
from tensorflow.python.client import timeline
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
import seq2seq_model
from tensorflow.python.framework import graph_util
flags = tf.flags
logging = tf.logging
logging.set_verbosity(tf.logging.ERROR)
flags.DEFINE_integer("encoder_step", 100, "sequence length")
flags.DEFINE_integer("encoder_layer", 8, "num layer")
flags.DEFINE_integer("decoder_step", 30, "sequence length")
flags.DEFINE_integer("decoder_layer", 4, "num layer")
flags.DEFINE_integer("hidden_size", 128, "hidden size")
flags.DEFINE_integer("batch_size", 1, "mini batch size")
flags.DEFINE_boolean('profile', False, 'profile kernel runtime')
flags.DEFINE_string('backend', 'tf', 'tf or wolong or ngraph')
flags.DEFINE_integer("num_iter", 10, "mini batch size")
flags.DEFINE_integer("warmup", 5, "mini batch size")
flags.DEFINE_boolean('xla', False, 'enable xla')
flags.DEFINE_string('frozen_file', '', 'output path for the frozen pb file')
flags.DEFINE_integer("parallel", 0, "tf.ConfigProto.inter_op_parallelism_threads")
FLAGS = flags.FLAGS
import ctypes
_cudart = ctypes.CDLL('libcudart.so')
def profile_start():
ret = _cudart.cudaProfilerStart()
if ret != 0:
raise Exception("cudaProfilerStart() returned %d" % ret)
def profile_stop():
ret = _cudart.cudaProfilerStop()
if ret != 0:
raise Exception("cudaProfilerStop() returned %d" % ret)
def main(_):
profile_stop()
session_conf = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
graph_options=tf.GraphOptions(infer_shapes=True),
inter_op_parallelism_threads=FLAGS.parallel
)
if FLAGS.xla:
session_conf.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
with tf.Graph().as_default(), tf.Session(config=session_conf) as session:
profile_stop()
batch_size = FLAGS.batch_size
model = seq2seq_model.Seq2SeqModel(
batch_size, FLAGS.hidden_size, FLAGS.encoder_layer, FLAGS.encoder_step, FLAGS.decoder_layer, FLAGS.decoder_step)
eval_inputs = tf.placeholder(
tf.float32, [FLAGS.encoder_step, FLAGS.batch_size, FLAGS.hidden_size], 'eval_input')
eval_inputs_list = tf.split(value=eval_inputs, axis=0, num_or_size_splits=FLAGS.encoder_step)
for i in range(len(eval_inputs_list)):
eval_inputs_list[i] = tf.squeeze(eval_inputs_list[i],axis=[0])
logits = model(eval_inputs_list)
lstm_inputs = np.ones(
(FLAGS.encoder_step, FLAGS.batch_size, FLAGS.hidden_size))
session.run(tf.global_variables_initializer())
if FLAGS.frozen_file != '':
constant_graph = graph_util.convert_variables_to_constants(session, session.graph_def, [logits.name.split(':')[0]])
with tf.gfile.GFile(FLAGS.frozen_file, "wb") as f:
f.write(constant_graph.SerializeToString())
if not FLAGS.profile:
# warm up
for i in range(FLAGS.warmup):
res = session.run(logits, {
eval_inputs: lstm_inputs})
out_flat = res.flat
if (len(out_flat) > 0):
max_len = min(10, len(out_flat))
print(logits.name)
print(out_flat[:max_len], "...(size=", len(out_flat), "end with", out_flat[-1], ")")
iter_times = []
profile_start()
for i in range(FLAGS.num_iter):
start_time = time.time()
res = session.run(logits, {
eval_inputs: lstm_inputs})
iter_time = (time.time() - start_time) * 1000
iter_times.append(iter_time)
print("Iteration time %f ms" % (iter_time))
profile_stop()
print("Summary: [min, max, mean] = [%f, %f, %f] ms" % (
min(iter_times), max(iter_times), sum(iter_times) / len(iter_times)))
else:
profile_start()
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
for i in range(5):
start_time = time.time()
res = session.run(logits, {
eval_inputs: lstm_inputs},
options=options,
run_metadata=run_metadata)
end_time = (time.time() - start_time) * 1000
print("iteration time %f ms" % (end_time))
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
with open('timelines/timeline_step_%d.json' % i, 'w') as f:
f.write(chrome_trace)
profile_stop()
if __name__ == "__main__":
tf.app.run()
| 2 | 2 |
CodeWars/7 Kyu/Return String of First Characters.py | anubhab-code/Competitive-Programming | 0 | 12767681 | def make_string(s):
return "".join(word[0] for word in s.split())
| 3.140625 | 3 |
Algorithm/Implementation/Grading Students.py | rahamath2009/git-github.com-nishant-sethi-HackerRank | 76 | 12767682 |
import sys
def solve(grades):
# Complete this function
new_grades=[]
for i in grades:
if(i<38):
new_grade=i
else:
x=i%5
if(x<3):
new_grade=i
else:
new_grade=(i/5+1)*5
new_grades.append(new_grade)
return new_grades
n = int(raw_input().strip())
grades = []
grades_i = 0
for grades_i in xrange(n):
grades_t = int(raw_input().strip())
grades.append(grades_t)
result = solve(grades)
print "\n".join(map(str, result)) | 3.6875 | 4 |
jianzhi_offer_31.py | JasonLeeSJTU/Algorithms_Python | 2 | 12767683 | <reponame>JasonLeeSJTU/Algorithms_Python<gh_stars>1-10
#!/usr/bin/env python
# encoding: utf-8
'''
@author: <NAME>
@license: (C) Copyright @ <NAME>
@contact: <EMAIL>
@file: jianzhi_offer_31.py
@time: 2019/4/23 16:09
@desc:
'''
class Solution:
def FindGreatestSumOfSubArray(self, array):
if not array:
return 0
f = array
for i in range(1, len(array)):
if f[i-1] <= 0:
f[i] = array[i]
else:
f[i] = f[i-1] + array[i]
return max(f)
if __name__ == '__main__':
res = Solution()
array = [6,-3,-2,7,-15,1,2,2]
b = res.FindGreatestSumOfSubArray(array)
print(b)
| 3.25 | 3 |
spirecomm/ai/agent.py | ErinMyLungs/spirecomm | 1 | 12767684 | import time
import random
import os
from spirecomm.spire.game import Game
from spirecomm.spire.character import Intent, PlayerClass
import spirecomm.spire.card
from spirecomm.spire.screen import RestOption
from spirecomm.communication.action import *
from spirecomm.ai.priorities import *
from spirecomm.ai.drafter import IroncladDraftModel
import csv
class SimpleAgent:
def __init__(self, chosen_class=PlayerClass.THE_SILENT, use_default_drafter=False, timestamp=None):
self.game = Game()
self.errors = 0
self.choose_good_card = False
self.skipped_cards = False
self.visited_shop = False
self.map_route = []
self.chosen_class = chosen_class
self.priorities = Priority()
self.drafter = IroncladDraftModel()
self.change_class(chosen_class)
self.use_default_drafter=use_default_drafter #if set to True, uses built in drafter from priorities module
self.timestamp = timestamp
def change_class(self, new_class):
self.chosen_class = new_class
if self.chosen_class == PlayerClass.THE_SILENT:
self.priorities = SilentPriority()
elif self.chosen_class == PlayerClass.IRONCLAD:
self.priorities = IroncladPriority()
elif self.chosen_class == PlayerClass.DEFECT:
self.priorities = DefectPowerPriority()
else:
self.priorities = random.choice(list(PlayerClass))
def handle_error(self, error):
raise Exception(error)
def get_next_action_in_game(self, game_state):
self.game = game_state
#time.sleep(0.07)
if self.game.choice_available or self.game.screen_type == ScreenType.GAME_OVER:
return self.handle_screen()
if self.game.proceed_available:
return ProceedAction()
if self.game.play_available:
if self.game.room_type == "MonsterRoomBoss" and len(self.game.get_real_potions()) > 0:
potion_action = self.use_next_potion()
if potion_action is not None:
return potion_action
return self.get_play_card_action()
if self.game.end_available:
return EndTurnAction()
# TODO: Possible fix for opening deck view on accident
if self.game.screen_type == None:
return ReturnAction()
if self.game.cancel_available:
return CancelAction()
def get_next_action_out_of_game(self):
return StartGameAction(self.chosen_class)
def is_monster_attacking(self):
for monster in self.game.monsters:
if monster.intent.is_attack() or monster.intent == Intent.NONE:
return True
return False
def get_incoming_damage(self):
incoming_damage = 0
for monster in self.game.monsters:
if not monster.is_gone and not monster.half_dead:
if monster.move_adjusted_damage is not None:
incoming_damage += monster.move_adjusted_damage * monster.move_hits
elif monster.intent == Intent.NONE:
incoming_damage += 5 * self.game.act
return incoming_damage
def get_low_hp_target(self):
available_monsters = [monster for monster in self.game.monsters if monster.current_hp > 0 and not monster.half_dead and not monster.is_gone]
best_monster = min(available_monsters, key=lambda x: x.current_hp)
return best_monster
def get_high_hp_target(self):
available_monsters = [monster for monster in self.game.monsters if monster.current_hp > 0 and not monster.half_dead and not monster.is_gone]
best_monster = max(available_monsters, key=lambda x: x.current_hp)
return best_monster
def many_monsters_alive(self):
available_monsters = [monster for monster in self.game.monsters if monster.current_hp > 0 and not monster.half_dead and not monster.is_gone]
return len(available_monsters) > 1
def get_play_card_action(self):
playable_cards = [card for card in self.game.hand if card.is_playable]
zero_cost_cards = [card for card in playable_cards if card.cost == 0]
zero_cost_attacks = [card for card in zero_cost_cards if card.type == spirecomm.spire.card.CardType.ATTACK]
zero_cost_non_attacks = [card for card in zero_cost_cards if card.type != spirecomm.spire.card.CardType.ATTACK]
nonzero_cost_cards = [card for card in playable_cards if card.cost != 0]
aoe_cards = [card for card in playable_cards if self.priorities.is_card_aoe(card)]
if self.game.player.block > self.get_incoming_damage() - (self.game.act + 4):
offensive_cards = [card for card in nonzero_cost_cards if not self.priorities.is_card_defensive(card)]
if len(offensive_cards) > 0:
nonzero_cost_cards = offensive_cards
else:
nonzero_cost_cards = [card for card in nonzero_cost_cards if not card.exhausts]
if len(playable_cards) == 0:
return EndTurnAction()
if len(zero_cost_non_attacks) > 0:
card_to_play = self.priorities.get_best_card_to_play(zero_cost_non_attacks)
elif len(nonzero_cost_cards) > 0:
card_to_play = self.priorities.get_best_card_to_play(nonzero_cost_cards)
if len(aoe_cards) > 0 and self.many_monsters_alive() and card_to_play.type == spirecomm.spire.card.CardType.ATTACK:
card_to_play = self.priorities.get_best_card_to_play(aoe_cards)
elif len(zero_cost_attacks) > 0:
card_to_play = self.priorities.get_best_card_to_play(zero_cost_attacks)
else:
# This shouldn't happen!
return EndTurnAction()
if card_to_play.has_target:
available_monsters = [monster for monster in self.game.monsters if monster.current_hp > 0 and not monster.half_dead and not monster.is_gone]
if len(available_monsters) == 0:
return EndTurnAction()
if card_to_play.type == spirecomm.spire.card.CardType.ATTACK:
target = self.get_low_hp_target()
else:
target = self.get_high_hp_target()
return PlayCardAction(card=card_to_play, target_monster=target)
else:
return PlayCardAction(card=card_to_play)
def use_next_potion(self):
for potion in self.game.get_real_potions():
if potion.can_use:
if potion.requires_target:
return PotionAction(True, potion=potion, target_monster=self.get_low_hp_target())
else:
return PotionAction(True, potion=potion)
def handle_screen(self):
if self.game.screen_type == ScreenType.EVENT:
if self.game.screen.event_id in ["Vampires", "Masked Bandits", "Knowing Skull", "Ghosts", "Liars Game", "Golden Idol", "Drug Dealer", "The Library"]:
return ChooseAction(len(self.game.screen.options) - 1)
else:
# NOTE: This looks like where Neow's blessing is chosen with the first option every time.
return ChooseAction(0)
elif self.game.screen_type == ScreenType.CHEST:
return OpenChestAction()
elif self.game.screen_type == ScreenType.SHOP_ROOM:
if not self.visited_shop:
self.visited_shop = True
return ChooseShopkeeperAction()
else:
self.visited_shop = False
return ProceedAction()
elif self.game.screen_type == ScreenType.SHOP_SCREEN:
if self.visited_shop:
return LeaveAction()
elif self.game.screen_type == ScreenType.REST:
return self.choose_rest_option()
elif self.game.screen_type == ScreenType.CARD_REWARD:
if self.use_default_drafter:
return self.default_choose_card_reward()
else:
return self.choose_card_reward()
elif self.game.screen_type == ScreenType.COMBAT_REWARD:
for reward_item in self.game.screen.rewards:
if reward_item.reward_type == RewardType.POTION and self.game.are_potions_full():
continue
elif reward_item.reward_type == RewardType.CARD and self.skipped_cards:
continue
else:
return CombatRewardAction(reward_item)
self.skipped_cards = False
return ProceedAction()
elif self.game.screen_type == ScreenType.MAP:
return self.make_map_choice()
elif self.game.screen_type == ScreenType.BOSS_REWARD:
relics = self.game.screen.relics
best_boss_relic = self.priorities.get_best_boss_relic(relics)
return BossRewardAction(best_boss_relic)
elif self.game.screen_type == ScreenType.SHOP_SCREEN:
if self.game.screen.purge_available and self.game.gold >= self.game.screen.purge_cost:
# TODO: This just purgest the first card in deck. Possibly hook into AI? Purity metrics? Purge card least like archetype?
return ChooseAction(name="purge")
for card in self.game.screen.cards:
if self.game.gold >= card.price and not self.priorities.should_skip(card):
return BuyCardAction(card)
for relic in self.game.screen.relics:
if self.game.gold >= relic.price:
return BuyRelicAction(relic)
return LeaveAction()
elif self.game.screen_type == ScreenType.GRID:
if not self.game.choice_available:
return ProceedAction()
if self.game.screen.for_upgrade or self.choose_good_card:
available_cards = self.priorities.get_sorted_cards(self.game.screen.cards)
else:
available_cards = self.priorities.get_sorted_cards(self.game.screen.cards, reverse=True)
num_cards = self.game.screen.num_cards
return CardSelectAction(available_cards[:num_cards])
elif self.game.screen_type == ScreenType.HAND_SELECT:
if not self.game.choice_available:
return ProceedAction()
# Usually, we don't want to choose the whole hand for a hand select. 3 seems like a good compromise.
num_cards = min(self.game.screen.num_cards, 3)
return CardSelectAction(self.priorities.get_cards_for_action(self.game.current_action, self.game.screen.cards, num_cards))
elif self.game.screen_type == ScreenType.GAME_OVER:
game_result = dict()
game_result['score'] = self.game.screen.score
if self.game.screen.victory == True:
game_result['score'] += 10000
game_result['floor'] = self.game.floor
game_result['seed'] = self.game.seed
game_result['choices'] = self.drafter.deck_pick
game_result['final_deck'] = self.drafter.deck
game_result['deck_vector'] = self.drafter.vectorize_deck()
game_result['time'] = time.time()
if self.use_default_drafter:
self.write_game_results(f'control_results_{self.timestamp}.csv', game_result)
else:
self.write_game_results(f'game_results_{self.timestamp}.csv', game_result)
return ProceedAction()
elif self.game.screen_type == None:
return ReturnAction()
else:
return ProceedAction()
def write_game_results(self, filepath:str, game_result:dict):
"""
takes in filepath and results and writes to csv file
:param filepath: filepath str. Writes to SlayTheSpire folder
:param game_result: dictionary of results
"""
mode = 'a'
if not os.path.exists(os.path.abspath(filepath)):
mode = 'w'
with open(filepath, mode) as file:
writer = csv.DictWriter(file, game_result.keys())
if mode == 'w':
writer.writeheader()
writer.writerow(game_result)
def choose_rest_option(self):
rest_options = self.game.screen.rest_options
if len(rest_options) > 0 and not self.game.screen.has_rested:
if RestOption.REST in rest_options and self.game.current_hp < self.game.max_hp / 2:
return RestAction(RestOption.REST)
elif RestOption.REST in rest_options and self.game.act != 1 and self.game.floor % 17 == 15 and self.game.current_hp < self.game.max_hp * 0.9:
return RestAction(RestOption.REST)
elif RestOption.SMITH in rest_options:
return RestAction(RestOption.SMITH)
elif RestOption.LIFT in rest_options:
return RestAction(RestOption.LIFT)
elif RestOption.DIG in rest_options:
return RestAction(RestOption.DIG)
elif RestOption.REST in rest_options and self.game.current_hp < self.game.max_hp:
return RestAction(RestOption.REST)
else:
return ChooseAction(0)
else:
return ProceedAction()
def count_copies_in_deck(self, card):
count = 0
for deck_card in self.game.deck:
if deck_card.card_id == card.card_id:
count += 1
return count
def default_choose_card_reward(self):
reward_cards = self.game.screen.cards
if self.game.screen.can_skip and not self.game.in_combat:
pickable_cards = [card for card in reward_cards if self.priorities.needs_more_copies(card, self.count_copies_in_deck(card))]
else:
pickable_cards = reward_cards
if len(pickable_cards) > 0:
potential_pick = self.priorities.get_best_card(pickable_cards)
return CardRewardAction(potential_pick)
elif self.game.screen.can_bowl:
return CardRewardAction(bowl=True)
else:
self.skipped_cards = True
return CancelAction()
def choose_card_reward(self):
"""
Function that chooses card rewards using neural net
:return: CardRewardAction with selected card
"""
reward_cards = self.game.screen.cards
self.drafter.update_floor(self.game.floor)
pick = self.drafter.choose_card(reward_cards)
return CardRewardAction(pick)
def generate_map_route(self):
node_rewards = self.priorities.MAP_NODE_PRIORITIES.get(self.game.act)
best_rewards = {0: {node.x: node_rewards[node.symbol] for node in self.game.map.nodes[0].values()}}
best_parents = {0: {node.x: 0 for node in self.game.map.nodes[0].values()}}
min_reward = min(node_rewards.values())
map_height = max(self.game.map.nodes.keys())
for y in range(0, map_height):
best_rewards[y+1] = {node.x: min_reward * 20 for node in self.game.map.nodes[y+1].values()}
best_parents[y+1] = {node.x: -1 for node in self.game.map.nodes[y+1].values()}
for x in best_rewards[y]:
node = self.game.map.get_node(x, y)
best_node_reward = best_rewards[y][x]
for child in node.children:
test_child_reward = best_node_reward + node_rewards[child.symbol]
if test_child_reward > best_rewards[y+1][child.x]:
best_rewards[y+1][child.x] = test_child_reward
best_parents[y+1][child.x] = node.x
best_path = [0] * (map_height + 1)
best_path[map_height] = max(best_rewards[map_height].keys(), key=lambda x: best_rewards[map_height][x])
for y in range(map_height, 0, -1):
best_path[y - 1] = best_parents[y][best_path[y]]
self.map_route = best_path
def make_map_choice(self):
if len(self.game.screen.next_nodes) > 0 and self.game.screen.next_nodes[0].y == 0:
self.generate_map_route()
self.game.screen.current_node.y = -1
if self.game.screen.boss_available:
return ChooseMapBossAction()
chosen_x = self.map_route[self.game.screen.current_node.y + 1]
for choice in self.game.screen.next_nodes:
if choice.x == chosen_x:
return ChooseMapNodeAction(choice)
# This should never happen
return ChooseAction(0)
def reset_drafter(self, filepath=None):
"""
helper to reset drafter to default configuration between runs
:param filepath: filepath to weights.npy
"""
if not filepath:
self.drafter = IroncladDraftModel()
else:
self.drafter = IroncladDraftModel(weights=filepath)
def update_timestamp(self):
"""
Sets timestamp attribute
:param timestamp:
:return:
"""
self.timestamp = str(int(time.time()))
return self.timestamp | 2.5 | 2 |
make_instance.py | github-nakasho/Pokemon_opt | 0 | 12767685 | #!/usr/bin/env python3
import numpy as np
import sys
def make_instance():
# normal、fire、water、electric、grass、ice、fighting, poison, ground,
# flying, psychic, bug, rock, ghost, dragon, dark, steel, fairy
type_matrix = np.array([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 0.0, 1.0, 1.0, 0.5, 1.0],
[1.0, 0.5, 0.5, 1.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 0.5, 1.0, 0.5, 1.0, 2.0, 1.0],
[1.0, 2.0, 0.5, 1.0, 0.5, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 1.0, 0.5, 1.0, 1.0, 1.0],
[1.0, 1.0, 2.0, 0.5, 0.5, 1.0, 1.0, 1.0, 0.0, 2.0, 1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0],
[1.0, 0.5, 2.0, 1.0, 0.5, 1.0, 1.0, 0.5, 2.0, 0.5, 1.0, 0.5, 2.0, 1.0, 0.5, 1.0, 0.5, 1.0],
[1.0, 0.5, 0.5, 1.0, 2.0, 0.5, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 0.5, 1.0],
[2.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 0.5, 1.0, 0.5, 0.5, 0.5, 2.0, 0.0, 1.0, 2.0, 2.0, 0.5],
[1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 0.5, 0.5, 1.0, 1.0, 1.0, 0.5, 0.5, 1.0, 1.0, 0.0, 2.0],
[1.0, 2.0, 1.0, 2.0, 0.5, 1.0, 1.0, 2.0, 1.0, 0.0, 1.0, 0.5, 2.0, 1.0, 1.0, 1.0, 2.0, 1.0],
[1.0, 1.0, 1.0, 0.5, 2.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 2.0, 0.5, 1.0, 1.0, 1.0, 0.5, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 1.0, 0.0, 0.5, 1.0],
[1.0, 0.5, 1.0, 1.0, 2.0, 1.0, 0.5, 0.5, 1.0, 0.5, 2.0, 1.0, 1.0, 0.5, 1.0, 2.0, 0.5, 0.5],
[1.0, 2.0, 1.0, 1.0, 1.0, 2.0, 0.5, 1.0, 0.5, 2.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 0.5, 1.0],
[0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 2.0, 1.0, 0.5, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 0.5, 0.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 2.0, 1.0, 0.5, 1.0, 0.5],
[1.0, 0.5, 0.5, 0.5, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 0.5, 2.0],
[1.0, 0.5, 1.0, 1.0, 1.0, 1.0, 2.0, 0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 0.5, 1.0]])
# make weak_matrix
weak_matrix = np.where(type_matrix==2.0, 1.0, 0.0)
resist_matrix = np.where(type_matrix<1.0, 1.0, 0.0)
# set enemy & skill
# enemy1
enemy1 = [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
skill1 = [[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# enemy2
enemy2 = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
skill2 = [[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# enemy3
enemy3 = [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
skill3 = [[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
# combine enemy into one list
enemy = [enemy1, enemy2, enemy3]
# combine skill into one list
skill = [skill1, skill2, skill3]
return type_matrix, weak_matrix, resist_matrix, enemy, skill
| 2.5625 | 3 |
garble.py | dehall/data-owner-tools | 4 | 12767686 | #!/usr/bin/env python3
import argparse
import glob
import os
from pathlib import Path
import subprocess
import sys
from zipfile import ZipFile
def parse_arguments():
parser = argparse.ArgumentParser(
description="Tool for garbling PII in for PPRL purposes in the CODI project"
)
parser.add_argument("sourcefile", help="Source PII CSV file")
parser.add_argument("schemadir", help="Directory of linkage schema")
parser.add_argument("secretfile", help="Location of de-identification secret file")
parser.add_argument(
'-z', '--outputzip', dest='outputzip', default="garbled.zip",
help="Specify an name for the .zip file. Default is garbled.zip"
)
parser.add_argument(
'-o', '--outputdir', dest='outputdir', default="output",
help="Specify an output directory. Default is output/"
)
args = parser.parse_args()
if not Path(args.schemadir).exists():
parser.error("Unable to find directory: " + args.schemadir)
if not Path(args.secretfile).exists():
parser.error("Unable to find secret file: " + args.secretfile)
return args
def validate_secret_file(secret_file):
secret = None
with open(secret_file, "r") as secret_text:
secret = secret_text.read()
if len(secret) < 256:
sys.exit("Secret length not long enough to ensure proper de-identification")
return secret
def garble_pii(args):
schema_dir = Path(args.schemadir)
secret_file = Path(args.secretfile)
source_file = args.sourcefile
os.makedirs('output', exist_ok=True)
secret = validate_secret_file(secret_file)
clk_files = []
schema = glob.glob(args.schemadir + "/*.json")
for s in schema:
with open(s, "r") as schema_file:
file_contents = schema_file.read()
if "doubleHash" in file_contents:
sys.exit(
"The following schema uses doubleHash, which is insecure: "
+ str(s)
)
output_file = Path(args.outputdir, s.split('/')[-1])
completed_process = subprocess.run(
["anonlink", "hash", source_file, secret, str(s), str(output_file)],
check=True
)
clk_files.append(output_file)
return clk_files
def create_clk_zip(clk_files, args):
with ZipFile(os.path.join(args.outputdir, args.outputzip), "w") as garbled_zip:
for clk_file in clk_files:
garbled_zip.write(clk_file)
print("Zip file created at: " + args.outputdir + '/' + args.outputzip)
def main():
args = parse_arguments()
clk_files = garble_pii(args)
create_clk_zip(clk_files, args)
if __name__ == "__main__":
main()
| 3.03125 | 3 |
database/inserts.py | retropleinad/DnD-Calculator | 0 | 12767687 | <filename>database/inserts.py<gh_stars>0
from . import util
path = util.PATH
"""
This file contains methods to insert data into different tables
"""
# Insert into the region table
def insert_region(name, description):
args = (name, description)
query = """
INSERT INTO region(name, description)
VALUES (?, ?)
"""
util.commit_query(query, args)
# Insert into the location table
def insert_location(name, description, region):
region_name = "SELECT region_id from region WHERE name = {0}".format(region)
args = (name, description, region)
query = """
INSERT INTO location(name, description, region)
VALUES (?, ?, ?)
"""
util.commit_query(query, args)
# Insert into the organization table
def insert_organization(name, description, region, headquarters):
args = (name, description, region, headquarters)
query = """
INSERT INTO organization(name, description, region, headquarters)
VALUES (?, ?, ?, ?)
"""
util.commit_query(query, args)
# Insert into the class table
def insert_class(name, description, source, page):
args = (name, description, source, page)
query = """
INSERT INTO class(name, description, source, page)
VALUES (?, ?, ?, ?)
"""
util.commit_query(query, args)
# Insert into the player characters table
def insert_pcs(player, name, description, alive, dnd_class, origin, area):
args = (player, name, description, alive, dnd_class, origin, area)
query = """
INSERT INTO pcs(player, name, description, alive, class, origin, area)
VALUES (?, ?, ?, ?, ?, ?, ?)
"""
util.commit_query(query, args)
# Insert into the npcs table
def insert_npcs(name, description, region, headquarters):
args = (name, description, region, headquarters)
query = """
INSERT INTO npcs(name, description, region, headquarters)
VALUES (?, ?, ?, ?)
"""
util.commit_query(query, args)
# Insert into the items table
def insert_item(name, description):
args = (name, description)
query = """
INSERT INTO items(name, description)
VALUES (?, ?)
"""
util.commit_query(query, args)
# Insert into the owner table
def insert_owner(item, pc, npc, organization):
args = (item, pc, npc, organization)
query = """
INSERT INTO item_owner(item, pc, npc, description)
VALUES (?, ?, ?
"""
util.commit_query(query, args) | 2.859375 | 3 |
client/python/GravityWebsocketApi.py | philoxnard/gravtest | 0 | 12767688 | <filename>client/python/GravityWebsocketApi.py
#!/usr/bin/python
"""
Client code that interacts with the web server over Websockets
Written by: <NAME>
Copyright 2019 Creative Collisions Technology, LLC
"""
import json
from websocket import create_connection
WEBSOCKET_URL_TEMPLATE = "ws://%s/ws1.ws"
class GravityWebsocketApi():
def __init__(self, gravityIP):
self.gravity_ip = gravityIP
self.gravity_websocket_url = WEBSOCKET_URL_TEMPLATE % self.gravity_ip
self.ws = create_connection( self.gravity_websocket_url )
def sendMessage(self, message):
message_str = json.dumps( message )
print "Sending: %s" % message_str
self.ws.send(message_str)
def receiveMessage(self):
result = self.ws.recv()
print "Received '%s'" % result
return result
def closeWebsocket(self):
self.ws.close()
def websocketTest(self):
message = {}
message["command"] = "ajax_test"
message["value"] = "TEST1234"
self.sendMessage( message )
result = self.receiveMessage()
return result
if __name__ == "__main__":
gravity_api = GravityWebsocketApi( "localhost" )
gravity_api.websocketTest() | 3.03125 | 3 |
test/test_codeTime.py | adarshtri/SE_Fall20_Project-1 | 0 | 12767689 | <gh_stars>0
import sublime
import sys
from datetime import datetime as dt
from unittest import TestCase
from unittest.mock import Mock, patch
version = sublime.version()
codeTime = sys.modules["CodeTime.code.SublimePlugin.codeTime"]
class TestFunctions(TestCase):
@patch('time.time', return_value=100)
def test_when_activated(self, mock_time):
view = Mock()
view.filename.return_value = "sample.txt"
# datetime = Mock()
codeTime.when_activated(view)
view.window.assert_called_once()
def test_when_deactivated(self):
view = Mock()
view.file_name.return_value = "sample.txt"
curr_date = dt.now().strftime('%Y-%m-%d')
codeTime.file_times_dict[curr_date] = {'sample.txt': ["1234", None]}
view.assert_called_once()
| 2.703125 | 3 |
tutorials/W3D2_DynamicNetworks/solutions/W3D2_Tutorial2_Solution_0349af2c.py | liuxiaomiao123/NeuroMathAcademy | 2 | 12767690 | <filename>tutorials/W3D2_DynamicNetworks/solutions/W3D2_Tutorial2_Solution_0349af2c.py
def get_eig_Jacobian(pars, fp):
"""
Simulate the Wilson-Cowan equations
Args:
pars : Parameter dictionary
fp : fixed point (E, I), array
Returns:
evals : 2x1 vector of eigenvalues of the Jacobian matrix
"""
#get the parameters
tau_E, a_E, theta_E = pars['tau_E'], pars['a_E'], pars['theta_E']
tau_I, a_I, theta_I = pars['tau_I'], pars['a_I'], pars['theta_I']
wEE, wEI = pars['wEE'], pars['wEI']
wIE, wII = pars['wIE'], pars['wII']
I_ext_E, I_ext_I = pars['I_ext_E'], pars['I_ext_I']
#initialization
E = fp[0]
I = fp[1]
J = np.zeros((2,2))
#Jacobian matrix
J[0,0] = (-1 + wEE*dF(wEE*E-wEI*I+I_ext_E,a_E,theta_E))/tau_E #dGE_dE
J[0,1] = (-wEI*dF(wEE*E-wEI*I+I_ext_E,a_E,theta_E))/tau_E #dGE_dI
J[1,0] = (wIE*dF(wIE*E-wII*I+I_ext_I,a_I,theta_I))/tau_I #dGI_dE
J[1,1] = (-1 - wII*dF(wIE*E-wII*I,a_I+I_ext_I,theta_I))/tau_I #dGI_dI
# Eigenvalues
evals = np.linalg.eig(J)[0]
return evals
eig_1 = get_eig_Jacobian(pars, x_fp_1)
eig_2 = get_eig_Jacobian(pars, x_fp_2)
eig_3 = get_eig_Jacobian(pars, x_fp_3)
print(eig_1, 'Stable point')
print(eig_2, 'Unstable point')
print(eig_3, 'Stable point') | 2.53125 | 3 |
forum/demo/06-demo.py | twiindan/api_lessons | 0 | 12767691 | <reponame>twiindan/api_lessons
__author__ = 'arobres'
import requests
### CUSTOM HEADERS ###
headers = {'my_header': 'important_header', 'content-type': 'application/json'}
response = requests.get('https://api-testing-conference.herokuapp.com/v1.0', headers=headers)
print(response.request.headers)
### COOKIES ###
url = 'https://api-testing-conference.herokuapp.com/v1.0/welcome'
response = requests.get(url=url)
cookies = response.cookies
print(response.text)
response = requests.get(url=url, cookies=cookies)
print(response.text)
url = 'http://httpbin.org/post'
files = {'file': open('01-demo.py', 'rb')}
response = requests.post(url, files=files)
print(response.status_code)
print(response.content)
| 2.6875 | 3 |
tests/profiles/test_constructors.py | pyroll-project/pyroll-core | 0 | 12767692 | import logging
import matplotlib.pyplot as plt
import numpy as np
import pytest
from shapely.affinity import rotate
from pyroll.core import SquareGroove, Profile
groove = SquareGroove(0, 3, tip_depth=20, tip_angle=91 / 180 * np.pi)
def test_from_groove():
Profile.from_groove(groove, width=45, height=50)
Profile.from_groove(groove, filling=0.9, gap=3)
def test_from_groove_errors():
with pytest.raises(TypeError):
Profile.from_groove(groove, width=55, filling=0.9, height=50, gap=3)
with pytest.raises(TypeError):
Profile.from_groove(groove, width=55, height=50, gap=3)
with pytest.raises(TypeError):
Profile.from_groove(groove, width=55, filling=0.9, height=50)
with pytest.raises(TypeError):
Profile.from_groove(groove, height=50)
with pytest.raises(TypeError):
Profile.from_groove(groove, gap=3)
with pytest.raises(TypeError):
Profile.from_groove(groove, width=55)
with pytest.raises(TypeError):
Profile.from_groove(groove, filling=0.9)
with pytest.raises(ValueError):
Profile.from_groove(groove, height=-1, width=50)
with pytest.raises(ValueError):
Profile.from_groove(groove, gap=-1, width=50)
with pytest.raises(ValueError):
Profile.from_groove(groove, width=-1, height=50)
with pytest.raises(ValueError):
Profile.from_groove(groove, filling=0, height=50)
def test_from_groove_warnings(caplog):
logging.getLogger("pyroll").error("Marker Error")
Profile.from_groove(groove, width=55, height=50)
Profile.from_groove(groove, filling=1.1, gap=3)
if not caplog.records:
pytest.xfail("Expected to fail if ran together with CLI tests, since CLI is modifying logging, so pytest does not capture.")
assert len([r for r in caplog.records if r.levelname == "WARNING" and r.msg.startswith("Encountered")]) > 1
def test_round():
p1 = Profile.round(radius=15)
p2 = Profile.round(diameter=30)
assert p1.cross_section == p2.cross_section
def test_round_errors():
with pytest.raises(ValueError):
Profile.round(radius=-1)
with pytest.raises(ValueError):
Profile.round(diameter=0)
def test_square():
p1 = Profile.square(side=10, corner_radius=1)
p2 = Profile.square(diagonal=10 * np.sqrt(2), corner_radius=1)
assert p1.cross_section == p2.cross_section
p3 = Profile.square(side=10)
p4 = Profile.square(diagonal=10 * np.sqrt(2))
assert p3.cross_section == p4.cross_section
def test_square_errors():
with pytest.raises(TypeError):
Profile.square(side=10, diagonal=10)
with pytest.raises(TypeError):
Profile.square()
with pytest.raises(ValueError):
Profile.square(side=-1)
with pytest.raises(ValueError):
Profile.square(diagonal=0)
with pytest.raises(ValueError):
Profile.square(corner_radius=-1, side=10)
def test_box():
Profile.box(height=10, width=20)
Profile.box(height=10, width=20, corner_radius=1)
def test_box_errors():
with pytest.raises(ValueError):
Profile.box(height=-1, width=5)
with pytest.raises(ValueError):
Profile.box(height=10, width=-1)
with pytest.raises(ValueError):
Profile.box(corner_radius=-1, height=10, width=5)
def test_diamond():
Profile.diamond(height=10, width=20)
Profile.diamond(height=10, width=20, corner_radius=1)
def test_diamond_errors():
with pytest.raises(ValueError):
Profile.diamond(height=-1, width=5)
with pytest.raises(ValueError):
Profile.diamond(height=10, width=-1)
with pytest.raises(ValueError):
Profile.diamond(corner_radius=-1, height=10, width=5)
def test_square_box_equivalence():
p1 = Profile.square(side=10, corner_radius=0)
p2 = Profile.box(height=10, width=10, corner_radius=0)
assert np.isclose(p1.cross_section.symmetric_difference(rotate(p2.cross_section, angle=45, origin=(0, 0))).area, 0)
p1 = Profile.square(side=10, corner_radius=2)
p2 = Profile.box(height=10, width=10, corner_radius=2)
assert np.isclose(p1.cross_section.symmetric_difference(rotate(p2.cross_section, angle=45, origin=(0, 0))).area, 0)
| 2.140625 | 2 |
tests.py | synapticarbors/fishers_exact_test | 47 | 12767693 | import numpy as np
import pytest
from fisher.cfisher import pvalue, pvalue_npy
# Computed by ``fisher.test`` in R 3.2.2 and printed with
# ``sprintf(".16f")``.
@pytest.mark.parametrize("table,expected", [
([[100, 2], [1000, 5]],
(0.1300759363430016, 0.9797904453147230, 0.1300759363430016)),
([[2, 100], [5, 1000]],
(0.9797904453147230, 0.1300759363430016, 0.1300759363430016)),
([[2, 7], [8, 2]],
(0.0185217259520665, 0.9990149169715733, 0.0230141375652212)),
([[5, 1], [10, 10]],
(0.9782608695652173, 0.1652173913043478, 0.1973244147157191)),
([[5, 15], [20, 20]],
(0.0562577507439996, 0.9849086665340765, 0.0958044001247763)),
([[5, 16], [20, 25]],
(0.0891382278309642, 0.9723490195633506, 0.1725864953812995)),
([[10, 5], [10, 1]],
(0.1652173913043479, 0.9782608695652174, 0.1973244147157192)),
([[10, 5], [10, 0]],
(0.0565217391304348, 1.0000000000000000, 0.0612648221343874)),
([[5, 0], [1, 4]],
(1.0000000000000000, 0.0238095238095238, 0.0476190476190476)),
([[0, 5], [1, 4]],
(0.5000000000000000, 1.0000000000000000, 1.0000000000000000)),
([[5, 1], [0, 4]],
(1.0000000000000000, 0.0238095238095238, 0.0476190476190476)),
([[0, 1], [3, 2]],
(0.4999999999999999, 1.0000000000000000, 1.0000000000000000))
])
def test_against_r(table, expected):
epsilon = 1e-10
p = pvalue(table[0][0], table[0][1], table[1][0], table[1][1])
assert abs(p.left_tail - expected[0]) < epsilon
assert abs(p.right_tail - expected[1]) < epsilon
assert abs(p.two_tail - expected[2]) < epsilon
| 1.710938 | 2 |
solutionPY/solban.py | RuwaidMuntajim/CodeForces | 0 | 12767694 | k, n, w = map(int, input().split())
x = 1
money = 0
while x <= w and money != -1:
money += k * x
x += 1
money_toborrow = money - n
if money_toborrow >= 0:
print(money_toborrow)
else:
print(0) | 2.875 | 3 |
Python_ex110_ex111/uteis.py | gabrieldepaiva/Exercicios-CursoEmVideo | 0 | 12767695 | <filename>Python_ex110_ex111/uteis.py
def fatorial (n):
r = 1
for num in range (n, 1, -1):
r *= num
return r
def dobro (n):
num = n * 2
return num
def triplo (n):
num = n * 3
return num | 3.296875 | 3 |
fem/gui/vtk_widget/vtk_graphics/pipelines/visible/__init__.py | mjredmond/FEMApp | 1 | 12767696 | from __future__ import print_function, absolute_import
from .visible_filter import VisibleFilter
from .visible_pipeline import VisiblePipeline
from .visible_selection import VisibleSelection
| 1.023438 | 1 |
tests/test_core.py | cjolowicz/retrocookie | 15 | 12767697 | """Tests for core module."""
from dataclasses import dataclass
from pathlib import Path
import pytest
from .helpers import append
from .helpers import branch
from .helpers import touch
from .helpers import write
from retrocookie import core
from retrocookie import git
from retrocookie import retrocookie
def in_template(path: Path) -> Path:
"""Prepend the template directory to the path."""
return "{{cookiecutter.project_slug}}" / path
@dataclass
class Example:
"""Example data for the test cases."""
path: Path = Path("README.md")
text: str = "Lorem Ipsum\n"
@pytest.fixture
def example() -> Example:
"""Fixture with example data."""
return Example()
@pytest.mark.parametrize(
"text, expected",
[
("Lorem Ipsum\n", "Lorem Ipsum\n"),
(
"This project is called example.\n",
"This project is called {{cookiecutter.project_slug}}.\n",
),
(
"python-version: ${{ matrix.python-version }}",
'python-version: ${{"{{"}} matrix.python-version {{"}}"}}',
),
],
)
def test_rewrite(
cookiecutter_repository: git.Repository,
cookiecutter_instance_repository: git.Repository,
text: str,
expected: str,
example: Example,
) -> None:
"""It rewrites the file contents as expected."""
cookiecutter, instance = cookiecutter_repository, cookiecutter_instance_repository
with branch(instance, "topic", create=True):
append(instance, example.path, text)
retrocookie(
instance.path,
path=cookiecutter.path,
branch="topic",
create_branch="topic",
)
with branch(cookiecutter, "topic"):
assert expected in cookiecutter.read_text(in_template(example.path))
def test_branch(
cookiecutter_repository: git.Repository,
cookiecutter_instance_repository: git.Repository,
example: Example,
) -> None:
"""It creates the specified branch."""
cookiecutter, instance = cookiecutter_repository, cookiecutter_instance_repository
with branch(instance, "topic", create=True):
append(instance, example.path, example.text)
retrocookie(
instance.path,
path=cookiecutter.path,
branch="topic",
create_branch="just-another-branch",
)
with branch(cookiecutter, "just-another-branch"):
assert example.text in cookiecutter.read_text(in_template(example.path))
def test_upstream(
cookiecutter_repository: git.Repository,
cookiecutter_instance_repository: git.Repository,
example: Example,
) -> None:
"""It does not apply changes from the upstream branch."""
cookiecutter, instance = cookiecutter_repository, cookiecutter_instance_repository
another = Path("file.txt")
with branch(instance, "upstream", create=True):
touch(instance, another)
with branch(instance, "topic", create=True):
append(instance, example.path, example.text)
retrocookie(
instance.path,
path=cookiecutter.path,
upstream="upstream",
branch="topic",
create_branch="topic",
)
with branch(cookiecutter, "topic"):
assert not cookiecutter.exists(another)
assert example.text in cookiecutter.read_text(in_template(example.path))
def test_single_commit(
cookiecutter_repository: git.Repository,
cookiecutter_instance_repository: git.Repository,
example: Example,
) -> None:
"""It cherry-picks the specified commit."""
cookiecutter, instance = cookiecutter_repository, cookiecutter_instance_repository
append(instance, example.path, example.text)
retrocookie(instance.path, ["HEAD"], path=cookiecutter.path)
assert example.text in cookiecutter.read_text(in_template(example.path))
def test_multiple_commits_sequential(
cookiecutter_repository: git.Repository,
cookiecutter_instance_repository: git.Repository,
) -> None:
"""It cherry-picks the specified commits."""
cookiecutter, instance = cookiecutter_repository, cookiecutter_instance_repository
names = "first", "second"
for name in names:
touch(instance, Path(name))
retrocookie(instance.path, ["HEAD~2.."], path=cookiecutter.path)
for name in names:
path = in_template(Path(name))
assert cookiecutter.exists(path)
def test_multiple_commits_parallel(
cookiecutter_repository: git.Repository,
cookiecutter_instance_repository: git.Repository,
) -> None:
"""It cherry-picks the specified commits."""
cookiecutter, instance = cookiecutter_repository, cookiecutter_instance_repository
names = "first", "second"
for name in names:
with branch(instance, name, create=True):
touch(instance, Path(name))
retrocookie(instance.path, names, path=cookiecutter.path)
for name in names:
path = in_template(Path(name))
assert cookiecutter.exists(path)
def test_find_template_directory_fails(tmp_path: Path) -> None:
"""It raises an exception when there is no template directory."""
repository = git.Repository.init(tmp_path)
with pytest.raises(Exception):
core.find_template_directory(repository)
def test_load_context_error(cookiecutter_instance_repository: git.Repository) -> None:
"""It raises an exception when .cookiecutter.json is not JSON dictionary."""
write(cookiecutter_instance_repository, Path(".cookiecutter.json"), "[]")
with pytest.raises(TypeError):
core.load_context(cookiecutter_instance_repository, "HEAD")
| 2.5 | 2 |
tomotopy/__init__.py | adbmd/tomotopy | 1 | 12767698 | <filename>tomotopy/__init__.py<gh_stars>1-10
"""
Python package `tomotopy` provides types and functions for various Topic Model
including LDA, DMR, HDP, MG-LDA, PA and HPA. It is written in C++ for speed and provides Python extension.
.. include:: ./documentation.rst
"""
from tomotopy._version import __version__
import tomotopy.utils as utils
from enum import IntEnum
class TermWeight(IntEnum):
"""
This enumeration is for Term Weighting Scheme and it is based on following paper:
> * <NAME>., & <NAME>. (2010, June). Term weighting schemes for latent dirichlet allocation. In human language technologies: The 2010 annual conference of the North American Chapter of the Association for Computational Linguistics (pp. 465-473). Association for Computational Linguistics.
There are three options for term weighting and the basic one is ONE. The others also can be applied for all topic models in `tomotopy`.
"""
ONE = 0
""" Consider every term equal (default)"""
IDF = 1
"""
Use Inverse Document Frequency term weighting.
Thus, a term occurring at almost every document has very low weighting
and a term occurring at a few document has high weighting.
"""
PMI = 2
"""
Use Pointwise Mutual Information term weighting.
"""
class ParallelScheme(IntEnum):
"""
This enumeration is for Parallelizing Scheme:
There are three options for parallelizing and the basic one is DEFAULT. Not all models supports all options.
"""
DEFAULT = 0
"""tomotopy chooses the best available parallelism scheme for your model"""
NONE = 1
"""
Turn off multi-threading for Gibbs sampling at training or inference. Operations other than Gibbs sampling may use multithreading.
"""
COPY_MERGE = 2
"""
Use Copy and Merge algorithm from AD-LDA. It consumes RAM in proportion to the number of workers.
This has advantages when you have a small number of workers and a small number of topics and vocabulary sizes in the model.
Prior to version 0.5, all models used this algorithm by default.
> * Newman, D., <NAME>., <NAME>., & <NAME>. (2009). Distributed algorithms for topic models. Journal of Machine Learning Research, 10(Aug), 1801-1828.
"""
PARTITION = 3
"""
Use Partitioning algorithm from PCGS. It consumes only twice as much RAM as a single-threaded algorithm, regardless of the number of workers.
This has advantages when you have a large number of workers or a large number of topics and vocabulary sizes in the model.
> * <NAME>., <NAME>., & <NAME>. (2009). Parallel inference for latent dirichlet allocation on graphics processing units. In Advances in neural information processing systems (pp. 2134-2142).
"""
isa = ''
"""
Indicate which SIMD instruction set is used for acceleration.
It can be one of `'avx2'`, `'avx'`, `'sse2'` and `'none'`.
"""
# This code is an autocomplete-hint for IDE.
# The object imported here will be overwritten by _load() function.
try: from _tomotopy import *
except: pass
def _load():
import importlib, os
from cpuinfo import get_cpu_info
flags = get_cpu_info()['flags']
env_setting = os.environ.get('TOMOTOPY_ISA', '').split(',')
if not env_setting[0]: env_setting = []
isas = ['avx2', 'avx', 'sse2', 'none']
isas = [isa for isa in isas if (env_setting and isa in env_setting) or (not env_setting and (isa in flags or isa == 'none'))]
if not isas: raise RuntimeError("No isa option for " + str(env_setting))
for isa in isas:
try:
mod_name = '_tomotopy' + ('_' + isa if isa != 'none' else '')
globals().update({k:v for k, v in vars(importlib.import_module(mod_name)).items() if not k.startswith('_')})
return
except:
if isa == isas[-1]: raise
_load()
import os
if os.environ.get('TOMOTOPY_LANG') == 'kr':
__doc__ = """`tomotopy` 패키지는 Python에서 사용가능한 다양한 토픽 모델링 타입과 함수를 제공합니다.
내부 모듈은 c++로 작성되었기 때문에 빠른 속도를 자랑합니다.
.. include:: ./documentation.kr.rst
"""
__pdoc__ = {}
__pdoc__['isa'] = """현재 로드된 모듈이 어떤 SIMD 명령어 세트를 사용하는지 표시합니다.
이 값은 `'avx2'`, `'avx'`, `'sse2'`, `'none'` 중 하나입니다."""
__pdoc__['TermWeight'] = """용어 가중치 기법을 선택하는 데에 사용되는 열거형입니다. 여기에 제시된 용어 가중치 기법들은 다음 논문을 바탕으로 하였습니다:
> * <NAME>., & <NAME>. (2010, June). Term weighting schemes for latent dirichlet allocation. In human language technologies: The 2010 annual conference of the North American Chapter of the Association for Computational Linguistics (pp. 465-473). Association for Computational Linguistics.
총 3가지 가중치 기법을 사용할 수 있으며 기본값은 ONE입니다. 기본값뿐만 아니라 다른 모든 기법들도 `tomotopy`의 모든 토픽 모델에 사용할 수 있습니다. """
__pdoc__['TermWeight.ONE'] = """모든 용어를 동일하게 간주합니다. (기본값)"""
__pdoc__['TermWeight.IDF'] = """역문헌빈도(IDF)를 가중치로 사용합니다.
따라서 모든 문헌에 거의 골고루 등장하는 용어의 경우 낮은 가중치를 가지게 되며,
소수의 특정 문헌에만 집중적으로 등장하는 용어의 경우 높은 가중치를 가지게 됩니다."""
__pdoc__['TermWeight.PMI'] = """점별 상호정보량(PMI)을 가중치로 사용합니다."""
__pdoc__['ParallelScheme'] = """병렬화 기법을 선택하는 데에 사용되는 열거형입니다. 총 3가지 기법을 사용할 수 있으나, 모든 모델이 아래의 기법을 전부 지원하지는 않습니다."""
__pdoc__['ParallelScheme.DEFAULT'] = """tomotopy가 모델에 따라 적합한 병럴화 기법을 선택하도록 합니다. 이 값이 기본값입니다."""
__pdoc__['ParallelScheme.NONE'] = """깁스 샘플링에 병렬화 기법을 사용하지 않습니다. 깁스 샘플링을 제외한 다른 연산들은 여전히 병렬로 처리될 수 있습니다."""
__pdoc__['ParallelScheme.COPY_MERGE'] = """
AD-LDA에서 제안된 복사 후 합치기 알고리즘을 사용합니다. 이는 작업자 수에 비례해 메모리를 소모합니다.
작업자 수가 적거나, 토픽 개수 혹은 어휘 집합의 크기가 작을 때 유리합니다.
0.5버전 이전까지는 모든 모델은 이 알고리즘을 기본으로 사용했습니다.
> * <NAME>., <NAME>., <NAME>., & <NAME>. (2009). Distributed algorithms for topic models. Journal of Machine Learning Research, 10(Aug), 1801-1828.
"""
__pdoc__['ParallelScheme.PARTITION'] = """
PCGS에서 제안된 분할 샘플링 알고리즘을 사용합니다. 작업자 수에 관계없이 단일 스레드 알고리즘에 비해 2배의 메모리만 소모합니다.
작업자 수가 많거나, 토픽 개수 혹은 어휘 집합의 크기가 클 때 유리합니다.
> * <NAME>., <NAME>., & <NAME>. (2009). Parallel inference for latent dirichlet allocation on graphics processing units. In Advances in neural information processing systems (pp. 2134-2142).
"""
del _load, IntEnum, os
| 2.25 | 2 |
app/recipe/test/test_tag_api.py | Webins/recipe-app-api | 1 | 12767699 | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAG_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
"""Test the publicly available tags API """
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required for retrieving tags"""
res = self.client.get(TAG_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Test the authorized user tags API"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'test123'
)
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name="TestTag1")
Tag.objects.create(user=self.user, name="TestTag2")
res = self.client.get(TAG_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for the authenticaded user"""
user2 = get_user_model().objects.create_user(
'<EMAIL>',
'test1234'
)
Tag.objects.create(user=user2, name='TestTagUser2')
tag = Tag.objects.create(user=self.user, name="TestTagUser1")
res = self.client.get(TAG_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag_succesfull(self):
"""Test creating a new tag"""
payload = {'name': 'TestTag'}
self.client.post(TAG_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
"""Test creating a new task with invalid payload"""
payload = {'name': ''}
res = self.client.post(TAG_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tag_assign_to_recipe(self):
"""Test filtering tags by those assigned to recipes"""
tag1 = Tag.objects.create(user=self.user, name='breakfast')
tag2 = Tag.objects.create(user=self.user, name='lunch')
recipe = Recipe.objects.create(
title='coriander eggs on toast',
time_minutes=10,
price=5.00,
user=self.user)
recipe.tags.add(tag1)
res = self.client.get(TAG_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
| 2.609375 | 3 |
src/Eukaryotic_Promoters_Classification/mouse_non_tata_deepromoter/DeePromoter.py | Shujun-He/Nucleic-Transformer | 12 | 12767700 | import random
import torch
import numpy as np
from torch import nn
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset, DataLoader, random_split
from torchvision import transforms, utils
class ParallelCNN(nn.Module):
def __init__(self, para_ker, pool_kernel=6, drop=0.5):
"""
Multiple CNN layer apply on input and concatenate the output
:param para_ker: List of kernel size that will be used
:param pool_kernel: Pooling parameter after CNN
:param drop: Dropout parameter
"""
super(ParallelCNN, self).__init__()
self.lseq = nn.ModuleList()
for k in para_ker:
seq = nn.Sequential(
nn.Conv1d(4, 4, kernel_size=k, padding="same"),
nn.ReLU(),
nn.MaxPool1d(pool_kernel),
nn.Dropout(drop)
)
self.lseq.append(seq)
def forward(self, inputs):
"""
:param inputs: DNA onehot sequences [batch_size x 4 x length]
:return: Stack CNN output feature from different kernel size [batch_size x 12 x length]
"""
_x = list()
for seq in self.lseq:
x = seq(inputs)
_x.append(x)
# concate outputs of every conv layer to a tensor
_x = torch.cat(_x, 1)
return _x
class BidirectionalLSTM(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(input_size, hidden_size, bidirectional=True, batch_first=True)
self.linear = nn.Linear(hidden_size * 2, output_size)
def forward(self, inputs):
"""
:param inputs: visual feature [batch_size x T x input_size]
:return: contextual feature [batch_size x T x output_size]
"""
self.rnn.flatten_parameters()
recurrent, _ = self.rnn(inputs) # batch_size x T x input_size -> batch_size x T x (2*hidden_size)
output = self.linear(recurrent) # batch_size x T x output_size
return output
class DeePromoter(nn.Module):
def __init__(self, para_ker, input_shape=(64, 300, 4), pool_kernel=6, drop=0.5):
"""
Deepromoter
:param para_ker: List of kernel size that will be used
:param input_shape: Specifies the input shape for model(fixed)
:param pool_kernel: Pooling parameter after CNN
:param drop: Dropout parameter
"""
super(DeePromoter, self).__init__()
binode = len(para_ker) * 4
self.pconv = ParallelCNN(para_ker, pool_kernel, drop)
self.bilstm = BidirectionalLSTM(binode, binode, binode)
self.flatten = nn.Flatten()
x = torch.zeros(input_shape)
shape = self.get_feature_shape(x)
self.fc = nn.Sequential(
nn.Linear(shape, shape),
nn.ReLU(),
nn.Linear(shape, 2),
)
def get_feature_shape(self, x):
"""Pass a dummy input through to find the shape
after flatten layer for Linear layer construction"""
x = x.permute(0, 2, 1)
x = self.pconv(x)
x = x.permute(0, 2, 1)
x = self.bilstm(x)
x = self.flatten(x)
return x.shape[1]
def forward(self, x):
x = x.permute(0, 2, 1)
x = self.pconv(x)
x = x.permute(0, 2, 1)
x = self.bilstm(x)
x = self.flatten(x)
x = self.fc(x)
return x
| 2.78125 | 3 |
catkin_ws/src/10-lane-control/line_detector2/include/line_detector2_tests/single_image_histogram.py | yxiao1996/dev | 2 | 12767701 | <reponame>yxiao1996/dev
from collections import OrderedDict
from comptests.registrar import comptest, run_module_tests
import os
import cv2
from duckietown_utils.download import download_if_not_exist
from duckietown_utils.jpg import image_cv_from_jpg_fn
from duckietown_utils.path_utils import get_ros_package_path
from line_detector2_tests.single_image import write_images_as_jpegs
import numpy as np
from reprep.graphics.filter_posneg import posneg_hinton, posneg
from scipy import stats
@comptest
def single_image_histograms():
url = 'https://www.dropbox.com/s/bzezpw8ivlfu4b0/frame0002.jpg?dl=0'
p = os.path.join(get_ros_package_path('line_detector2'),
'include', 'line_detector2_tests', 'frame0002.jpg')
download_if_not_exist(url, p)
image_cv = image_cv_from_jpg_fn(p)
res = go(image_cv)
write_images_as_jpegs('single_image_histograms', res)
def go(image_bgr):
res = OrderedDict()
H, _W = image_bgr.shape[:2]
cut = 0.3
image_bgr_cut = image_bgr[int(cut*H):,:,:]
res['image_bgr'] = image_bgr
res['image_bgr_cut'] = image_bgr_cut
hsv_map = np.zeros((180, 256, 3), np.uint8)
hsv_map_h, hsv_map_s = np.indices(hsv_map.shape[:2])
hsv_map[:,:,0] = hsv_map_h
hsv_map[:,:,1] = hsv_map_s
hsv_map[:,:,2] = 255
hsv_map = cv2.cvtColor(hsv_map, cv2.COLOR_HSV2BGR)
# cv2.imshow('hsv_map', hsv_map)
res['hsv_map'] = hsv_map
hist_scale = 10
hsv = cv2.cvtColor(image_bgr_cut, cv2.COLOR_BGR2HSV)
# dark = hsv[...,2] < 32
# hsv[dark] = 0
h0 = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
res['another'] = posneg(h0)
# hf = h0.flatten()
# c = np.empty_like(h0)
# for i in range(c.shape[0]):
# for j in range(c.shape[1]):
# c[i,j]=stats.percentileofscore(hf, h0[i,j])
# res['another2'] = posneg(c)
h = h0 * hist_scale
# h = np.clip(h*0.005*hist_scale, 0, 1)
vis = hsv_map*h[:,:,np.newaxis] / 255.0
res['vis'] = vis
used = h> 0
res['vis2'] = hsv_map * used[:,:,np.newaxis]
return res
if __name__ == '__main__':
run_module_tests() | 2.15625 | 2 |
src/main.py | yoshihidesawada/DCUF | 2 | 12767702 | ###
# python main.py ${lam} ${ICF_EPOCH} ${AE_EPOCH} ${DCUF_EPOCH}
###
import sys
import random
import numpy as np
import chainer
import environment as Env
import model as Model
if len(sys.argv) != 5:
print 'error'
print sys.argv
exit(0)
_, lamb, icf_epoch, ae_epoch, dcuf_epoch = sys.argv
def disentangling_controllable_uncontrollable_factors(lamb, icf_epoch, ae_epoch, dcuf_epoch):
# Hyper-parameters setting
# policy_num: the number of policies
# action_num: the number of actions (e.g., up, down, left, right)
# access_num: hyper-parameter for computing disentangled objective
# max_epoch: iteration number
policy_num = 4
action_num = policy_num
access_num = 20
max_epoch = icf_epoch+ae_epoch+dcuf_epoch
# Input state initialized settings
state = np.zeros((Env.width,Env.height),dtype=np.float32)
# chainer setup
optimizer = chainer.optimizers.Adam()
model_dcuf = Model.DCUF(Env.width, Env.height, access_num, policy_num, \
action_num, icf_epoch, ae_epoch, dcuf_epoch)
optimizer.setup(model_dcuf)
# Training iteration
for t in range(0,max_epoch):
# Get state from environment
# state: t-th image
# x, y: coordinate of the controllable object
# x_u, y_u: coordinate of the uncontrollable object
state, x, y, x_u, y_u = Env.environment(action_num,t)
# Gradient initialization
model_dcuf.zerograds()
# next_state: t+1-th images to compute disentangled objective
# s: t+1-th image computed from (x,y), (x_u,y_u), action
restate = state.reshape((1, 1, Env.width, Env.height))
next_state = []
for k in range(1,action_num+1):
for j in range(0,access_num):
s = Env.next_state_from_environment(x, y, x_u, y_u,\
k, action_num, t+1)
next_state.append(s)
next_state = np.array(next_state, dtype=np.float32)
renext_state = next_state.reshape((action_num,access_num,Env.width,Env.height))
# chainer optimization
loss_dcuf = model_dcuf(restate, renext_state, x, y, lamb, t)
loss_dcuf.backward()
optimizer.update()
# Save model
outfile = "dcuf"+".model"
chainer.serializers.save_npz(outfile,model_dcuf)
if __name__ == '__main__':
# ramdom seed initialization
# This initialization is not so important
random.seed(1)
np.random.seed(1)
# Hyper-parameters
# lamb: lambda to balance between disentangled objective and reconstruction error
# icf_epoch: epoch of ICF
# ae_epoch: epoch of AE (by setting it, DCUF may more stably detect uncontrollable objects)
# dcuf_epoch: epoch of DCUF
lamb = float(lamb)
icf_epoch = int(icf_epoch)
ae_epoch = int(ae_epoch)
dcuf_epoch = int(dcuf_epoch)
disentangling_controllable_uncontrollable_factors(lamb, icf_epoch, ae_epoch, dcuf_epoch)
| 2.5625 | 3 |
utils/wfuzzbasicauthbrute/wfuzz/plugins/scripts/robots.py | ismailbozkurt/kubebot | 171 | 12767703 | <reponame>ismailbozkurt/kubebot
import re
from urlparse import urlparse, urljoin
from framework.plugins.api import DiscoveryPlugin
from framework.plugins.api import url_filename
from externals.moduleman.plugin import moduleman_plugin
@moduleman_plugin
class robots(DiscoveryPlugin):
name = "robots"
description = "Parses robots.txt looking for new content. Optional: discovery.bl=\".txt,.gif\""
category = ["default", "active", "discovery"]
priority = 99
def validate(self, fuzzresult):
return url_filename(fuzzresult) == "robots.txt" and fuzzresult.code == 200
def process(self, fuzzresult):
# Shamelessly (partially) copied from w3af's plugins/discovery/robotsReader.py
for line in fuzzresult.history.fr_content().split('\n'):
line = line.strip()
if len(line) > 0 and line[0] != '#' and (line.upper().find('ALLOW') == 0 or\
line.upper().find('DISALLOW') == 0 or line.upper().find('SITEMAP') == 0):
url = line[ line.find(':') + 1 : ]
url = url.strip(" *")
if url and not self.blacklisted_extension(url):
self.queue_url(urljoin(fuzzresult.url, url))
| 2.53125 | 3 |
tolteca/web/templates/userlog.py | dennis-l/tolteca | 2 | 12767704 | <reponame>dennis-l/tolteca
#! /usr/bin/env python
from dasha.web.templates import ComponentTemplate
import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_core_components as dcc
from dasha.web.extensions.db import dataframe_from_db
from dasha.web.templates.common import LiveUpdateSection
from dash.dependencies import Input, Output, State
from dasha.web.templates.utils import partial_update_at
from dash_table import DataTable
import dash
from ..tasks.dbrt import dbrt
import cachetools.func
import sqlalchemy.sql.expression as se
from sqlalchemy.sql import func as sqla_func
from datetime import datetime, timedelta, timezone
import astropy.units as u
from tollan.utils.db.conventions import utcnow
from tollan.utils.log import get_logger
def _get_toltec_userlog_id_latest():
logger = get_logger()
dbrt.ensure_connection('toltec')
t = dbrt['toltec'].tables['userlog']
stmt = se.select([t.c.id]).order_by(se.desc(t.c.id)).limit(1)
with dbrt['toltec'].session_context as session:
id_latest = session.execute(stmt).scalar()
logger.debug(f"latest id: {id_latest}")
return id_latest
def _get_toltecdb_obsnum_latest():
logger = get_logger()
dbrt.ensure_connection('toltec')
t = dbrt['toltec'].tables['toltec']
stmt = se.select([t.c.ObsNum]).order_by(se.desc(t.c.ObsNum)).limit(1)
with dbrt['toltec'].session_context as session:
obsnum_latest = session.execute(stmt).scalar()
logger.debug(f"latest obsnum: {obsnum_latest}")
return obsnum_latest
@cachetools.func.ttl_cache(maxsize=1, ttl=1)
def query_toltec_userlog(time_start=None, time_end=None, n_entries=None):
dbrt.ensure_connection('toltec')
t = dbrt['toltec'].tables
session = dbrt['toltec'].session
session.commit()
conditions = []
if time_start is not None:
conditions.append(
sqla_func.timestamp(
t['userlog'].c.Date,
t['userlog'].c.Time) >= time_start,
)
if time_end is not None:
conditions.append(
sqla_func.timestamp(
t['userlog'].c.Date,
t['userlog'].c.Time) <= time_end,
)
if n_entries is not None:
id_latest = _get_toltec_userlog_id_latest()
id_since = id_latest - n_entries + 1
conditions.extend(
[
t['userlog'].c.id <= id_latest,
t['userlog'].c.id >= id_since
])
df_userlog = dataframe_from_db(
se.select(
[
t['userlog'].c.id,
t['userlog'].c.ObsNum,
sqla_func.timestamp(
t['userlog'].c.Date,
t['userlog'].c.Time).label('DateTime'),
t['userlog'].c.Entry,
t['userlog'].c.User,
# t['userlog'].c.Keyword,
]
).where(
se.and_(*conditions)), session=session)
return df_userlog
def insert_to_toltec_userlog(user, obsnum, entry):
logger = get_logger()
logger.debug(f"insert to userlog obsnum={obsnum} entry={entry}")
dbrt.ensure_connection('toltec_userlog_tool')
bind = 'toltec_userlog_tool'
t = dbrt[bind].tables
stmt = (
se.insert(t['userlog']).
values(
{
'User': user,
'ObsNum': obsnum,
'Entry': entry,
'Date': utcnow(),
'Time': utcnow(),
})
)
logger.debug(f"insert stmt: {stmt}")
with dbrt[bind].session_context as session:
session.execute(stmt)
return
def make_labeled_drp(form, label, **kwargs):
igrp = form.child(dbc.InputGroup, size='sm', className='pr-2')
igrp.child(dbc.InputGroupAddon(label, addon_type='prepend'))
return igrp.child(dbc.Select, **kwargs)
def make_labeled_input(
form, label, input_cls=dbc.Input, make_extra_container=False,
**kwargs):
igrp = form.child(dbc.InputGroup, size='sm', row=True)
width = kwargs.pop('width', 10)
label_width = 12 - width
lbl = igrp.child(dbc.Label, label, width=label_width)
if make_extra_container:
extra_container = igrp.child(dbc.Col, className='d-flex')
inp_container = extra_container
else:
inp_container = igrp.child(dbc.Col)
extra_container = None
inp = inp_container.child(input_cls, width=width, **kwargs)
lbl.html_for = inp.id
if make_extra_container:
return inp, extra_container
return inp
class UserLogTool(ComponentTemplate):
_component_cls = html.Div
logger = get_logger()
def setup_layout(self, app):
container = self
header_container, body = container.grid(2, 1)
header = header_container.child(
LiveUpdateSection(
title_component=html.H3("User Log Tool"),
interval_options=[2000, 5000, 10000],
interval_option_value=2000
))
inputs_container, controls_container, view_container = body.grid(3, 1)
inputs_container.className = 'mt-4 mb-4'
inputs_form = inputs_container.child(
dbc.Form,
style={'width': '50vw'}
)
input_user = make_labeled_input(
inputs_form, "User",
input_cls=dbc.Input,
debounce=True,
type='text',
bs_size="sm",
style={
'width': '15em',
}
)
input_obsnum, input_obsnum_extra_container = make_labeled_input(
inputs_form, "ObsNum",
input_cls=dbc.Input,
make_extra_container=True,
debounce=True,
type='number',
min=0, step=1,
bs_size="sm",
style={
'width': '10em',
}
)
latest_obsnum_btn = input_obsnum_extra_container.child(
dbc.Button, 'Fill latest ObsNum',
size='sm', color='link',
className='ml-2 mb-2',
)
input_entry = make_labeled_input(
inputs_form, "Entry",
input_cls=dbc.Textarea,
# debounce=True,
bs_size="sm",
className='mb-2',
required=True,
minLength=1,
)
# here we wrap the submit btn in a loading state to debounce
submit_btn_loading, submit_btn_extra_container = make_labeled_input(
inputs_form, "",
input_cls=dcc.Loading,
make_extra_container=True,
# className='mb-2',
type='dot',
parent_style={
'height': '38px' # this matches the button
}
)
submit_btn = submit_btn_loading.child(
dbc.Button,
# size="sm",
color='primary',
children='Submit',
className='mb-2 mr-2'
)
# the button itself can't be the trigger so we need another
# dummy div in the dcc.Loading
on_submit_trigger = submit_btn_loading.child(html.Div)
response_container = submit_btn_extra_container.child(html.Div)
controls_container.className = 'mt-2'
controls_form = controls_container.child(dbc.Form, inline=True)
view_latest_since_drp = make_labeled_drp(
controls_form, 'Show entries of last',
options=[
{
'label': f'{n}',
'value': n,
}
for n in ['1 d', '7 d', '30 d']],
value='1 d',
)
view_n_entries_max_drp = make_labeled_drp(
controls_form, 'Show maximum',
options=[
{'label': f'{n} entries', 'value': n}
for n in [50, 200, 1000]
],
value=50,
)
view_container.className = 'mt-2'
log_dt = view_container.child(
DataTable,
style_cell={
'padding': '0.5em',
'width': '0px',
},
css=[
{
'selector': (
'.dash-spreadsheet-container '
'.dash-spreadsheet-inner *, '
'.dash-spreadsheet-container '
'.dash-spreadsheet-inner *:after, '
'.dash-spreadsheet-container '
'.dash-spreadsheet-inner *:before'),
'rule': 'box-sizing: inherit; width: 100%;'
}
],
style_cell_conditional=[
{
'if': {'column_id': 'Entry'},
'textAlign': 'left',
'whiteSpace': 'normal',
'height': 'auto',
},
],
# style_data_conditional=[
# ]
)
super().setup_layout(app)
@app.callback(
Output(input_obsnum.id, 'value'),
[
Input(latest_obsnum_btn.id, 'n_clicks')
]
)
def fill_latest_obsnum(n_clicks):
latest_obsnum = _get_toltecdb_obsnum_latest()
return int(latest_obsnum)
@app.callback(
[
Output(response_container.id, 'children'),
Output(on_submit_trigger.id, 'children'),
],
[
Input(submit_btn.id, 'n_clicks'),
],
[
State(input_user.id, 'value'),
State(input_obsnum.id, 'value'),
State(input_entry.id, 'value'),
],
prevent_initial_call=True
)
def on_submit(n_clicks, user, obsnum, entry):
def make_output(color, message):
return [
dbc.Alert(
message, color=color,
duration=3000,
fade=True,
className='mx-0 my-0',
style={
# these matches the button
'height': '38px',
'padding-top': '0.375em',
'padding-bottom': '0.375em',
}
),
""]
if obsnum is None or obsnum < 0 or entry in [None, '']:
return make_output(
'danger',
'Error: incomplete form data.')
# create entry and push to db
try:
insert_to_toltec_userlog(
user=user, obsnum=obsnum, entry=entry)
except Exception:
self.logger.error("failed create record in db", exc_info=True)
return make_output(
'danger',
'Error: unable to update database.'
)
return make_output(
'success',
'Success.')
@app.callback(
[
Output(log_dt.id, 'columns'),
Output(log_dt.id, 'data'),
Output(header.loading.id, 'children'),
Output(header.banner.id, 'children'),
],
header.timer.inputs + [
Input(view_latest_since_drp.id, 'value'),
Input(view_n_entries_max_drp.id, 'value'),
# this is here to trigger update on submit
Input(on_submit_trigger.id, 'children'),
]
)
def update_view(
n_calls, view_latest_since_value, view_n_entries_max_value,
submit_btn_loading_state):
latest_since_value, latest_since_unit = \
view_latest_since_value.split()
latest_since = latest_since_value << u.Unit(latest_since_unit)
view_n_entries_max = int(view_n_entries_max_value)
time_end = datetime.now(timezone.utc)
time_start = time_end - timedelta(
hours=latest_since.to_value('hr'))
try:
df_userlog = query_toltec_userlog(
time_start=time_start,
time_end=time_end,
n_entries=view_n_entries_max)
except Exception as e:
self.logger.debug(f'Error query db: {e}', exc_info=True)
return partial_update_at(
-1, dbc.Alert(
f'Error query db: {e}', color='danger'))
df = df_userlog
df = df.sort_values(by='DateTime', ascending=False)
data = df.to_dict('record')
columns = [
{
'label': c,
'id': c
}
for c in df.columns
]
return columns, data, '', dash.no_update
| 1.710938 | 2 |
src/vater/request_types.py | myslak71/vater | 2 | 12767705 | """This module contains logic for different API request types."""
import datetime
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Tuple, Union
import requests
from requests import Response
from vater.errors import (
ERROR_CODE_MAPPING,
InvalidRequestData,
MaximumParameterNumberExceeded,
UnknownExternalApiError,
)
from vater.models import Subject, SubjectSchema
class RequestType(ABC):
"""Base class for all request types."""
def __init__(self, url_pattern: str, *args, validators=None, **kwargs) -> None:
"""Initialize instance parameters."""
self.params: Dict[str, Any] = {}
self.url_pattern = url_pattern
self.validators = {} if validators is None else validators
self.validated_params: dict = {}
def _get_url(self) -> None:
"""Interpolate endpoint url."""
url = self.url_pattern
for key, value in self.validated_params.items(): # type: ignore
if f"{{{key}}}" in self.url_pattern:
if isinstance(value, (str, datetime.date)):
url = url.replace(f"{{{key}}}", str(value))
else:
url = url.replace(f"{{{key}}}", ",".join(value))
self.url = self.client.base_url + url # type: ignore
def register_params(self, **kwargs: Any) -> None:
"""Register parameters to the instance."""
self.client = kwargs.pop("client")
self.params = kwargs
if self.params["date"] is None: # type: ignore
self.params["date"] = datetime.date.today() # type: ignore
def validate(self) -> None:
"""Validate given parameters."""
for param, value in self.params.items(): # type: ignore
try:
for validator in self.validators[param]:
self.validated_params[param] = validator(value)
except KeyError:
self.validated_params[param] = value
def send_request(self) -> Response:
"""Get response from the API."""
self._get_url()
response = requests.get(self.url)
if response.status_code == 400:
raise InvalidRequestData(ERROR_CODE_MAPPING[response.json()["code"]])
elif response.status_code != 200:
raise UnknownExternalApiError(response.status_code, response.text)
return response
@abstractmethod
def result(self):
"""Return request result."""
class CheckRequest(RequestType):
"""Class for check requests type."""
def result(self) -> Union[dict, Tuple[bool, str]]:
"""Return check result if account is assigned to the subject and request id."""
self.validate()
response = self.send_request()
if self.params.get("raw"): # type: ignore
return response.json()
result = response.json()["result"]
return result["accountAssigned"] == "TAK", result["requestId"]
class SearchRequest(RequestType):
"""Class for search requests type."""
PARAM_LIMIT = 30
def __init__(self, url_pattern: str, many: bool = False, *args, **kwargs) -> None:
"""Initialize additional `many` attribute."""
super().__init__(url_pattern, *args, **kwargs)
self.many = many
def validate(self) -> None:
"""Validate given parameters."""
super().validate()
if not self.many:
return
param = ({*self.params} - {"raw", "date"}).pop() # type: ignore
if len(self.params[param]) > self.PARAM_LIMIT: # type: ignore
raise MaximumParameterNumberExceeded(param, self.PARAM_LIMIT)
def result(
self
) -> Union[dict, Tuple[Union[List[Subject], Optional[Subject]], str]]:
"""Return subject/subjects mapped to the specific parameter and request id."""
self.validate()
response = self.send_request()
if self.params.get("raw"): # type: ignore
return response.json()
result = response.json()["result"]
if not self.many and result["subject"] is None:
return None, result["requestId"]
return (
SubjectSchema().load(
result["subjects" if self.many else "subject"], many=self.many
),
result["requestId"],
)
| 2.703125 | 3 |
app/models.py | Linyameng/alphadata-dev | 0 | 12767706 | # -*- coding: utf-8 -*-
"""
Created on 2018/5/24
@author: <NAME>
"""
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask_login import UserMixin, AnonymousUserMixin
from flask import current_app
from . import login_manager
from . import db
from datetime import datetime
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=True, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, True),
'Moderate': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES |
Permission.MODERATE_COMMENTS, False),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions, role.default = roles.get(r)
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expires=3600):
s = Serializer(current_app.config['SECRET_KEY'], expires)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expires=3600):
s = Serializer(current_app.config['SECRET_KEY'], expires)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = <PASSWORD>
db.session.add(self)
return True
def generate_change_email_token(self, new_email, expires=3600):
s = Serializer(current_app.config['SECRET_KEY'], expires)
return s.dumps({'user_id': self.id, 'email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('user_id') != self.id:
return False
new_email = data.get('email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
db.session.add(self)
return True
def can(self, permissions):
return self.role is not None and (self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def __repr__(self):
return '<User %r>' % self.username
class Permission:
FOLLOW = 0x01
COMMENT = 0x02
WRITE_ARTICLES = 0x04
MODERATE_COMMENTS = 0x08
ADMINISTER = 0x80
class PageView(db.Model):
__tablename__ = 'pageview'
id = db.Column(db.Integer, primary_key=True)
view = db.Column(db.String(64), index=True)
pv = db.Column(db.Integer, default=0)
create_time = db.Column(db.DateTime(), default=datetime.utcnow)
last_time = db.Column(db.DateTime(), default=datetime.utcnow)
def update_time(self):
self.last_time = datetime.utcnow()
db.session.add(self)
class EnvironMapping(db.Model):
__tablename__ = 'environ_mapping'
id = db.Column(db.Integer, primary_key=True)
tp_ip = db.Column(db.String(64))
tms_ip = db.Column(db.String(64), index=True)
user_prefix = db.Column(db.String(64))
tms_db = db.Column(db.String(64), default='192.168.220.119')
tp_db = db.Column(db.String(64), default='192.168.220.126')
tms_dsn = db.Column(db.String(64), default='192.168.220.119:1521/hbqa')
tp_dsn = db.Column(db.String(64), default='192.168.220.126:1521/hbqa11g')
db_password = db.Column(db.String(64), default='<PASSWORD>')
redis_peer = db.Column(db.String(64))
class SaveSql(db.Model):
__tablename__ = 'save_sql'
id = db.Column(db.Integer, primary_key=True)
SQL_statements = db.Column(db.String(64))
instance = db.Column(db.String(64))
Desc = db.Column(db.String(250))
creator = db.Column(db.String(64))
modifier = db.Column(db.String(64))
CREATE_DATE = db.Column(db.Date)
UPDATE_DATE = db.Column(db.Date)
class actioncolumn(db.Model):
__tablename__ = 'actioncolumn'
actioncolumnid = db.Column(db.Integer, primary_key=True)
actioncolumnname = db.Column(db.String(255))
| 2.296875 | 2 |
ResponseML/crawler/firestore.py | deliciafernandes/Response-Disaster-Management | 1 | 12767707 | <reponame>deliciafernandes/Response-Disaster-Management<filename>ResponseML/crawler/firestore.py
#####################################Data class
class RealNews(object):
def __init__(self, date, headline, description,distype, url="", imageurl="",location=""):
self.date = date
self.headline = headline
self.description = description
self.url = url
self.distype = distype
self.imageurl = imageurl
self.location = location
@staticmethod
def from_dict(source):
self.date = source['date']
self.headline = source['headline']
self.description = source['description']
self.url = source['url']
self.distype = source['distype']
self.imageurl = source['imageurl']
self.location = source['location']
def to_dict(self):
return {
'date': self.date,
'headline' : self.headline,
'description' : self.description,
'url' : self.url,
'distype' : self.distype,
'imageurl' : self.imageurl,
'location' : self.location,
}
def __repr__(self):
return(f"News( date: {self.date}, headline : {self.headline}, description : {self.description}, url : {self.url}, distype : {self.distype}, imageurl : {self.imageurl}, location : {self.location})")
#####################################write Data
# date = input('Enter date: ')
# headline = input('Enter headlines: ')
# description = input('Enter description: ')
# distype = input('Enter disaster type: ')
# url = input('Enter url: ')
# imageurl = input('Enter image url: ')
# location = input('Enter location: ')
| 3.296875 | 3 |
azext_iot/central/providers/export_provider.py | lucadruda/azure-iot-cli-extension | 1 | 12767708 | <reponame>lucadruda/azure-iot-cli-extension
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from typing import List, Union
from knack.log import get_logger
from knack.util import CLIError
from azext_iot.central.providers.central_provider import CentralProvider
from azext_iot.constants import CENTRAL_ENDPOINT
from azext_iot.central import services as central_services
from azext_iot.central.models.v1_1_preview import ExportV1_1_preview
logger = get_logger(__name__)
class CentralExportProvider(CentralProvider):
def __init__(self, cmd, app_id: str, api_version: str, token=None):
super().__init__(cmd, app_id, api_version, token=token)
self._exports = {}
def list_exports(
self, central_dns_suffix=CENTRAL_ENDPOINT
) -> List[Union[dict, ExportV1_1_preview]]:
exports = central_services.export.list_exports(
cmd=self._cmd,
app_id=self._app_id,
token=self._token,
central_dns_suffix=central_dns_suffix,
api_version=self._api_version,
)
# add to cache
for export in exports:
self._exports.update({export["id"]: export})
return exports
def add_export(
self, export_id, payload, central_dnx_suffix=CENTRAL_ENDPOINT
) -> Union[dict, ExportV1_1_preview]:
if export_id in self._exports:
raise CLIError("Destination already exists")
export = central_services.export.add_export(
self._cmd,
self._app_id,
export_id=export_id,
payload=payload,
token=self._token,
api_version=self._api_version,
central_dns_suffix=central_dnx_suffix,
)
if not export:
raise CLIError("Failed to create export with id: '{}'.".format(export_id))
# add to cache
self._exports[export["id"]] = export
return export
def update_export(
self, export_id, payload, central_dnx_suffix=CENTRAL_ENDPOINT
) -> Union[dict, ExportV1_1_preview]:
export = central_services.export.update_export(
self._cmd,
self._app_id,
export_id=export_id,
payload=payload,
token=self._token,
api_version=self._api_version,
central_dns_suffix=central_dnx_suffix,
)
if not export:
raise CLIError("Failed to create export with id: '{}'.".format(export_id))
# add to cache
self._exports[export_id] = export
return export
def get_export(
self, export_id, central_dnx_suffix=CENTRAL_ENDPOINT
) -> Union[dict, ExportV1_1_preview]:
# get or add to cache
export = self._exports.get(export_id)
if not export:
export = central_services.export.get_export(
cmd=self._cmd,
app_id=self._app_id,
token=self._token,
api_version=self._api_version,
export_id=export_id,
central_dns_suffix=central_dnx_suffix,
)
if not export:
raise CLIError("No export found with id: '{}'.".format(export_id))
else:
self._exports[export_id] = export
return export
def delete_export(self, export_id, central_dnx_suffix=CENTRAL_ENDPOINT):
central_services.export.delete_export(
cmd=self._cmd,
app_id=self._app_id,
token=self._token,
api_version=self._api_version,
export_id=export_id,
central_dns_suffix=central_dnx_suffix,
)
self._exports.pop(export_id, None)
| 1.757813 | 2 |
classfile/member_info.py | kimi641/pyJVM | 0 | 12767709 | <filename>classfile/member_info.py
from typing import List
from classfile.class_read import ClassReader
from classfile.attribute_info import readAttributes,CodeAttribute,ConstantValueAttribute
class MemberInfo:
def __init__(self,
cp,
accessFlags,
nameIndex,
descriptorIndex,
attributes):
self.cp = cp
self.accessFlags = accessFlags
self.nameIndex = nameIndex
self.descriptorIndex = descriptorIndex
self.attributes = attributes
@property
def AccessFlags(self):
return self.accessFlags
@property
def Name(self) -> str:
return self.cp.getUtf8(self.nameIndex)
@property
def Descriptor(self) -> str:
return self.cp.getUtf8(self.descriptorIndex)
@property
def CodeAttribute(self) -> CodeAttribute:
for attrInfo in self.attributes:
if isinstance(attrInfo,CodeAttribute):
return attrInfo
return None
def ConstantValueAttribute(self):
for attrInfo in self.attributes:
if isinstance(attrInfo,ConstantValueAttribute):
return attrInfo
return None
def readMembers(reader:ClassReader, cp) -> List[MemberInfo]:
memberCount = reader.readUint16()
members = []
for i in range(memberCount):
members.append(readMember(reader, cp))
return members
def readMember(reader:ClassReader, cp) -> MemberInfo:
return MemberInfo(cp,
reader.readUint16(),
reader.readUint16(),
reader.readUint16(),
readAttributes(reader, cp))
| 2.78125 | 3 |
solutions/flask-adopt-solution/part-1/models.py | demohack/nonpub | 0 | 12767710 | <gh_stars>0
"""Models for adopt app."""
from flask_sqlalchemy import SQLAlchemy
GENERIC_IMAGE = "https://mylostpetalert.com/wp-content/themes/mlpa-child/images/nophoto.gif"
db = SQLAlchemy()
class Pet(db.Model):
"""Adoptable pet."""
__tablename__ = "pets"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False)
species = db.Column(db.Text, nullable=False)
photo_url = db.Column(db.Text)
age = db.Column(db.Integer)
notes = db.Column(db.Text)
available = db.Column(db.Boolean, nullable=False, default=True)
def image_url(self):
"""Return image for pet -- bespoke or generic."""
return self.photo_url or GENERIC_IMAGE
def connect_db(app):
"""Connect this database to provided Flask app.
You should call this in your Flask app.
"""
db.app = app
db.init_app(app)
| 2.8125 | 3 |
bicicleta.py | PaloBraga/Helloworld-Python | 0 | 12767711 | bicicleta=["bike","cannon","cargo", "CALOI"]
#Armazenamento de farias mensagens/lista em uma string
print(bicicleta[0].title())
print(bicicleta[1])
print(bicicleta[2])
print(bicicleta[3])
print(bicicleta[-1])
print(bicicleta[-2])
print(bicicleta[-3])
print(bicicleta[-4].title())
#como imprimir uma mensagem especifica de uma strings que contem varias mensagens/listas
mensagem="Minha Primeira Bicicleta foi uma " + bicicleta[3].title() + "!"
print(mensagem)
| 4.125 | 4 |
setup.py | trenton42/txwac | 0 | 12767712 | import re
try:
import setuptools
except ImportError:
import distutils.core
setup = distutils.core.setup
else:
setup = setuptools.setup
setup(
name='txwac',
version=(re
.compile(r".*__version__ = '(.*?)'", re.S)
.match(open('txwac.py').read())
.group(1)),
url='https://github.com/trenton42/txwac/',
license=open('LICENSE').read(),
author='wac',
author_email='<EMAIL>',
description='Writing RESTful API clients.',
long_description=(
open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read()
),
py_modules=['txwac'],
package_data={'': ['LICENSE']},
include_package_data=True,
tests_require=[
'mock>=0.8',
'simplejson >= 2.1',
'unittest2 >= 0.5.1',
'iso8601',
],
install_requires=[
'treq'
],
test_suite='trial',
classifiers=[
'Intended Audience :: Developers',
'Development Status :: 4 - Beta',
'Natural Language :: English',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: ISC License (ISCL)',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
],
)
| 1.695313 | 2 |
data-prep/dictionary-comprehension.py | Omelette-du-croissant/da-ta | 0 | 12767713 | <filename>data-prep/dictionary-comprehension.py
import random
# Key: word, value: character count
# Example list with random sentences (from https://randomwordgenerator.com/sentence.php):
sentences = ["The chic gangster liked to start the day with a pink scarf.", "The wooden spoon couldn’t cut but left emotional scars.",
"Tomatoes make great weapons when water balloons aren’t available.", "Weather is not trivial - it's especially important when you're standing in it.",
"Getting up at dawn is for the birds.", "I just wanted to tell you I could see the love you have for your child by the way you look at her.",
"The delicious aroma from the kitchen was ruined by cigarette smoke.", "Bill ran from the giraffe toward the dolphin.", "The crowd yells and screams for more memes.",
"Mothers spend months of their lives waiting on their children.", "Gwen had her best sleep ever on her new bed of nails.",
"We're careful about orange ping pong balls because people might think they're fruit.", "I'd rather be a bird than a fish.",
"He picked up trash in his spare time to dump in his neighbor's yard.", "For the 216th time, he said he would quit drinking soda after this last Coke.",
"When he asked her favorite number, she answered without hesitation that it was diamonds.", "With a single flip of the coin, his life changed forever.",
"You'll see the rainbow bridge after it rains cats and dogs.", "He didn't understand why the bird wanted to ride the bicycle."]
#Random choice:
sentence = random.choice(sentences)
#Create dictionary
dict = {item:len(item) for item in sentence.split()}
print(dict)
# Change code as needed!
| 3.90625 | 4 |
src/DH/State.py | fgwy/uavSim | 30 | 12767714 | <gh_stars>10-100
import numpy as np
from src.Map.Map import Map
from src.StateUtils import pad_centered
from src.base.BaseState import BaseState
class DHScenario:
def __init__(self):
self.device_idcs = []
self.device_data = []
self.position_idx = 0
self.movement_budget = 100
class DHState(BaseState):
def __init__(self, map_init: Map):
super().__init__(map_init)
self.device_list = None
self.device_map = None # Floating point sparse matrix showing devices and their data to be collected
self.position = [0, 0]
self.movement_budget = 0
self.landed = False
self.terminal = False
self.device_com = -1
self.initial_movement_budget = 0
self.initial_total_data = 0
self.collected = None
def set_landed(self, landed):
self.landed = landed
def set_position(self, position):
self.position = position
def decrement_movement_budget(self):
self.movement_budget -= 1
def set_terminal(self, terminal):
self.terminal = terminal
def set_device_com(self, device_com):
self.device_com = device_com
def get_remaining_data(self):
return np.sum(self.device_map)
def get_total_data(self):
return self.initial_total_data
def get_scalars(self):
"""
Return the scalars without position, as it is treated individually
"""
return np.array([self.movement_budget])
def get_num_scalars(self):
return 1
def get_boolean_map(self):
padded_red = pad_centered(self, np.concatenate([np.expand_dims(self.no_fly_zone, -1),
np.expand_dims(self.obstacles, -1)], axis=-1), 1)
padded_rest = pad_centered(self, np.expand_dims(self.landing_zone, -1), 0)
return np.concatenate([padded_red, padded_rest], axis=-1)
def get_boolean_map_shape(self):
return self.get_boolean_map().shape
def get_float_map(self):
return pad_centered(self, np.expand_dims(self.device_map, -1), 0)
def get_float_map_shape(self):
return self.get_float_map().shape
def is_in_landing_zone(self):
return self.landing_zone[self.position[1]][self.position[0]]
def is_in_no_fly_zone(self):
# Out of bounds is implicitly nfz
if 0 <= self.position[1] < self.no_fly_zone.shape[0] and 0 <= self.position[0] < self.no_fly_zone.shape[1]:
return self.no_fly_zone[self.position[1], self.position[0]]
return True
def get_collection_ratio(self):
return np.sum(self.collected) / self.initial_total_data
def get_collected_data(self):
return np.sum(self.collected)
def reset_devices(self, device_list):
self.device_map = device_list.get_data_map(self.no_fly_zone.shape)
self.collected = np.zeros(self.no_fly_zone.shape, dtype=float)
self.initial_total_data = device_list.get_total_data()
self.device_list = device_list
def is_terminal(self):
return self.terminal
| 2.71875 | 3 |
hapycolor/visual.py | rvdz/hapycolor | 1 | 12767715 | <reponame>rvdz/hapycolor
""" Utilitary methods to display ouputs """
# Add color to a string
def str_color(message, rgbcol):
r, g, b = rgbcol
return "\033[38;2;{};{};{}m{}\033[0m".format(r, g, b, message)
# print(squares from a palette)
def print_palette(rgbcols, size=2):
str_palette = ""
for col in rgbcols:
str_palette += str_color("██"*size, col)
# for s in range(size):
print(str_palette)
| 3.390625 | 3 |
sdk/keyvault/azure-keyvault/azure/keyvault/v7_3_preview/models/action_py3.py | mccoyp/azure-keyvault-7.3-preview | 0 | 12767716 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Action(Model):
"""The action that will be executed.
:param action_type: The type of the action. Possible values include:
'EmailContacts', 'AutoRenew'
:type action_type: str or ~certificates.models.ActionType
"""
_attribute_map = {
'action_type': {'key': 'action_type', 'type': 'ActionType'},
}
def __init__(self, *, action_type=None, **kwargs) -> None:
super(Action, self).__init__(**kwargs)
self.action_type = action_type
| 1.882813 | 2 |
code/rsfc-roi2roi.py | NBCLab/PACE | 0 | 12767717 | <reponame>NBCLab/PACE
import argparse
import os
import os.path as op
from glob import glob
from shutil import copyfile
import pandas as pd
def _get_parser():
parser = argparse.ArgumentParser(description="Run RSFC in AFNI")
parser.add_argument(
"--clean_dir",
dest="clean_dir",
required=True,
help="Path to denoised data directory",
)
parser.add_argument(
"--rsfc_dir",
dest="rsfc_dir",
required=True,
help="Path to RSFC directory",
)
parser.add_argument(
"--atlas_dir",
dest="atlas_dir",
required=True,
help="Path to atlas directory",
)
parser.add_argument(
"--subject",
dest="subject",
required=True,
help="Subject identifier, with the sub- prefix.",
)
parser.add_argument(
"--sessions",
dest="sessions",
default=[None],
required=False,
nargs="+",
help="Sessions identifier, with the ses- prefix.",
)
parser.add_argument(
"--space",
dest="space",
default="MNI152NLin2009cAsym",
required=False,
help="Standard space, MNI152NLin2009cAsym",
)
parser.add_argument(
"--desc_list",
dest="desc_list",
required=True,
nargs="+",
help="Name of the output files in the order [Clean, Clean + Smooth]",
)
parser.add_argument(
"--n_jobs",
dest="n_jobs",
default=4,
required=False,
help="CPUs",
)
return parser
def roi_resample(roi_in, roi_out, template):
cmd = f"3dresample \
-prefix {roi_out} \
-master {template} \
-inset {roi_in}"
print(f"\t\t\t{cmd}", flush=True)
os.system(cmd)
def ave_timeseries(mask, rs_file, rs_timeseries):
cmd = f"3dmaskave \
-q \
-mask {mask} \
{rs_file} > {rs_timeseries}"
print(f"\t\t\t{cmd}", flush=True)
os.system(cmd)
def make_label_table(lab_file, lab_table, atlas_img):
cmd = f'@MakeLabelTable \
-lab_file {lab_file} 1 0 \
-labeltable {lab_table} \
-dset {atlas_img}'
print(f"\t\t\t{cmd}", flush=True)
os.system(cmd)
def roi2roi_conn(clean_subj_fn, mask_file, atlas_img, rsfc_atlas_subj):
cmd = f'3dNetCorr \
-inset {clean_subj_fn} \
-mask {mask_file} \
-in_rois {atlas_img} \
-fish_z \
-ts_wb_Z -ts_wb_strlabel -nifti \
-ts_out \
-ts_label \
-prefix {rsfc_atlas_subj}'
print(f"\t\t\t{cmd}", flush=True)
os.system(cmd)
def main(clean_dir, rsfc_dir, atlas_dir, subject, sessions, space, desc_list, n_jobs):
"""Run denoising workflows on a given dataset."""
os.system(f"export OMP_NUM_THREADS={n_jobs}")
assert len(desc_list) == 2
atlases = sorted(glob(op.join(atlas_dir, "*")))
if sessions[0] is None:
temp_ses = glob(op.join(clean_dir, subject, "ses-*"))
if len(temp_ses) > 0:
sessions = [op.basename(x) for x in temp_ses]
for session in sessions:
if session is not None:
clean_subj_dir = op.join(clean_dir, subject, session, "func")
rsfc_subj_dir = op.join(rsfc_dir, subject, session, "func")
else:
clean_subj_dir = op.join(clean_dir, subject, "func")
rsfc_subj_dir = op.join(rsfc_dir, subject, "func")
# Collect important files
clean_subj_files = sorted(
glob(
op.join(
clean_subj_dir, f"*task-rest*_space-{space}*_desc-{desc_list[0]}_bold.nii.gz"
)
)
)
if len(clean_subj_files) > 0:
os.makedirs(rsfc_subj_dir, exist_ok=True)
# ###################
# RSFC
# ###################
for clean_subj_file in clean_subj_files:
clean_subj_name = op.basename(clean_subj_file)
prefix = clean_subj_name.split("desc-")[0].rstrip("_")
mask_files = sorted(glob(op.join(clean_subj_dir, f"{prefix}_desc-brain_mask.nii.gz")))
assert len(mask_files) == 1
mask_name = os.path.basename(mask_files[0])
mask_file = op.join(rsfc_subj_dir, mask_name)
copyfile(mask_files[0], mask_file)
print(f"\tProcessing {subject}, {session} files:", flush=True)
print(f"\t\tClean: {clean_subj_file}", flush=True)
print(f"\t\tMask: {mask_file}", flush=True)
for atlas in atlases:
atlas_name = op.basename(atlas)
atlas_imgs = sorted(glob(op.join(atlas, "*.nii.gz")))
assert len(atlas_imgs) == 1
atlas_img = atlas_imgs[0]
lab_files = sorted(glob(op.join(atlas, "*.txt")))
if len(lab_files) == 0:
# Do not create label table file
make_table = False
else:
assert len(lab_files) == 1
lab_file = lab_files[0]
make_table = True
# Resample atlas
atlas_img_res = op.join(rsfc_subj_dir, f"{prefix}_desc-{atlas_name}_atlas.nii.gz")
if not op.exists(atlas_img_res):
roi_resample(atlas_img, atlas_img_res, clean_subj_file)
# Create label table
lab_table = op.join(rsfc_subj_dir, f"{prefix}_desc-{atlas_name}_labtable.niml.lt")
if (not op.exists(lab_table)) and (make_table):
make_label_table(lab_file, lab_table, atlas_img_res)
# Calculate RSFC
rsfc_atlas_subj = op.join(rsfc_subj_dir, f"{prefix}_desc-{atlas_name}")
if not op.exists(f"{rsfc_atlas_subj}_000.netcc"):
roi2roi_conn(clean_subj_file, mask_file, atlas_img_res, rsfc_atlas_subj)
def _main(argv=None):
option = _get_parser().parse_args(argv)
kwargs = vars(option)
main(**kwargs)
if __name__ == "__main__":
_main()
| 2.296875 | 2 |
16str_bin_convert.py | Eric-Matrix/tools | 0 | 12767718 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import sys
import os
import struct
"""
python str16_bin_convert.py file(can include path)
"""
#input file
#output file
def Str16_to_binFile(inputpath, outpath):
with open(inputpath, 'r') as f:
str_buffer = f.read()
#print(str_buffer)
with open(outpath,'wb') as f1:
j = 0
while(j < len(str_buffer) -1):
a = str_buffer[j:j+2]
j+=2
#print(a)
b = int("0x"+a, 16)
f1.write(struct.pack('B', b))
def binFile_to_Str16(inputpath, outpath):
buffer = []
Str16 = ''
with open(inputpath, 'rb') as f:
buffer = f.read()
print(type(buffer))
for i in buffer:
#获取每个字节并转换成十进制数字
b = struct.unpack('B', i)[0]
#16进制转换
c = hex(b)
#将16进制数字去掉0X
d = str(c[2:]).upper()
if(len(d)) == 1: #如果不足2位,前面需要补0
d = '0'+ d
Str16 = Str16 + d
#print(Str16)
with open(outpath, 'w') as f:
f.write(Str16)
if __name__ == '__main__':
print('-' * 80)
print('Usage python 16str_bin_convert.py input_file_path ')
print('python 16str_bin_convert.py ./a.bin:意思是把a.bin中二进制按字节转换成相同的字符串!')
print('python 16str_bin_convert.py ./a.txt: 意思是把txt中的字符串转成相同的二进制文件!')
print('例如:二进制文件内容是 "0x9D 0x2F 0x0D....",转换成字符串是9D2F0D...')
print('输出文件在同一路径下')
print('-' * 80)
if(len(sys.argv) < 2):
print('请检查参数!')
else:
input_file_path = sys.argv[1]
filepath, tempFileName = os.path.split(sys.argv[1])
filename, extension = os.path.splitext(tempFileName)
if(filepath == ''):
filepath = filepath + '.'
print(filepath)
if(extension == '.bin'):
output_file = filepath + '/' + filename + '.txt'
binFile_to_Str16(input_file_path, output_file)
print('输出文件:' + output_file + 'finshed!')
elif(extension == '.txt'):
output_file = filepath + '/' + filename + '.bin'
print(output_file)
Str16_to_binFile(input_file_path, output_file)
print('输出文件:' + output_file + 'finshed!')
| 3.515625 | 4 |
level1/jupfits/FitJupObservation.py | SharperJBCA/COMAPAnalysis | 1 | 12767719 | import numpy as np
from matplotlib import pyplot
try:
import ConfigParser
except ModuleNotFoundError:
import configparser as ConfigParser
import argparse
import h5py
from scipy.signal import savgol_filter
import Pointing
from os import listdir, getcwd
from os.path import isfile, join
import Mapping
import Pointing
import mpi4py
import FitSource
import EphemNew
import healpy as hp
def cel2gal(ra,dec, inverse=False):
_r, _d = ra*np.pi/180., (np.pi/2. - dec*np.pi/180.)
if inverse:
r = hp.Rotator(coord=['G','C'])
else:
r = hp.Rotator(coord=['C','G'])
_d, _r = r(_d, _r)
return _r*180./np.pi, (np.pi/2. - _d)*180./np.pi
def SlewDistance(az):
daz = np.abs(az[:az.size-1] - az[1:az.size])
# loop over spikes
start = np.argmax(daz)
peaks = [start]
searchRange = 1000
indices = np.arange(daz.size).astype(int)
find = np.zeros(daz.size).astype(bool)
thres = 0.01
while True:
find = find | (indices > start-searchRange) & (indices < start + searchRange)
if (np.sum(find) == daz.size):
break
start = (indices[~find])[np.argmax(daz[~find])]
peaks += [start]
if np.max(daz[find]) < thres:
break
peaks = np.sort(np.array(peaks))
peakAz = az[peaks]
slewDist = np.abs(peakAz[:peakAz.size//2 *2:2] - peakAz[1:peakAz.size//2 *2:2])
return np.median(slewDist)
def main(filename, plotDir='Plots/'):
"""
"""
# Which pixels and sidebands?
pixelOffsets = Pointing.GetPixelOffsets('COMAP_FEEDS.dat')
# READ IN THE DATA
d = h5py.File(filename)
tod = d['spectrometer/tod']
mjd = d['spectrometer/MJD'][:]
if len(d['pointing/az'].shape) > 1:
az = d['pointing/az'][0,:]
el = d['pointing/el'][0,:]
else:
az = d['pointing/az'][:]
el = d['pointing/el'][:]
mjdpoint = d['pointing/MJD'][:]
slewDist = SlewDistance(az)
ra, dec, pa, az, el, mjd = Pointing.GetPointing(az, el, mjd,
mjdpoint, pixelOffsets,
lon=Pointing.comap_lon,
lat=Pointing.comap_lat)
# Calculate data sizes:
nHorns = tod.shape[0]
nSBs = tod.shape[1]
nFreqs = tod.shape[2]
nSamps = tod.shape[3]
# Calculate the position of Jupiter
clon, clat, diam = EphemNew.rdplan(mjd[0:1], 5,
Pointing.comap_lon*np.pi/180.,
Pointing.comap_lat*np.pi/180.)
EphemNew.precess(clon, clat, mjd[0:1])
# Loop over horns/SBs
P1out = None
prefix = filename.split('/')[-1].split('.')[0]
for iHorn in range(nHorns):
print('Processing Horn {:d}'.format(iHorn+1))
_tod = np.nanmean(np.nanmean(tod[iHorn,:,5:-5,:],axis=0),axis=0)
#Tim: Pass this function whatever chunk of time-ordered data you have in memory
P1, P1e, cross, mweight, weight, model = FitSource.FitTOD(_tod,
ra[0,:], # horn 0 because we want the relative offset from Focal Plane
dec[0,:],
clon*180./np.pi,
clat*180./np.pi,
pa[0,:],
prefix='{}_Horn{}'.format(prefix, iHorn+1),
plotDir=plotDir)
if isinstance(P1out, type(None)):
P1out = np.zeros((nHorns, len(P1)))
Peout = np.zeros((nHorns, len(P1e)))
mout = np.zeros(mweight.shape)
hout = np.zeros(weight.shape)
if not isinstance(P1, type(None)):
P1out[iHorn, :] = P1
Peout[iHorn, :] = P1e
mout += mweight*(model+1)**2
hout += weight*(model+1)**2
pyplot.imshow(mout/hout, extent=[-100/2. * 1.5, 100/2.*1.5,-100/2. * 1.5, 100/2.*1.5] )
pyplot.xlabel('Az offset (arcmin)')
pyplot.ylabel('EL offset (arcmin)')
pyplot.title('{}'.format(prefix))
pyplot.grid(True)
pyplot.savefig('{}/FeedPositions_{}.png'.format(plotDir, prefix), bbox_inches='tight')
pyplot.clf()
meanMJD = np.mean(mjd)
meanEl = np.median(el)
meanAz = np.median(az)
d.close()
print('SLEW DISTANCE', slewDist)
return P1out, Peout, mout/hout, meanMJD, meanEl, meanAz
from mpi4py import MPI
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--filename', type=str)
parser.add_argument('--filelist', default=None, type=str)
parser.add_argument('--fitoutputdir', default='.', type=str)
args = parser.parse_args()
P1 = None
if isinstance(args.filelist, type(None)):
main(args.filename)
else:
filelist = np.loadtxt(args.filelist, dtype=str)
for i, f in enumerate(filelist):
print('Opening',f)
_P1, _P1e, m, meanMJD, meanEl, meanAz = main(f)
prefix = f.split('/')[-1].split('.h')[0]
output = h5py.File('{}/{}_JupiterFits.h5'.format(args.fitoutputdir, prefix))
output['P1'] = _P1
output['P1e'] = _P1e
coords = np.zeros(3)
coords[:] = meanAz, meanEl, meanMJD,
output['coords'] = coords
output['map'] = m
output.close()
| 1.984375 | 2 |
python/popart.ir/python_files/ops/group_norm.py | gglin001/popart | 0 | 12767720 | <filename>python/popart.ir/python_files/ops/group_norm.py
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
from typing import Optional
import popart._internal.ir as _ir
from popart.ir.context import get_current_context
from popart.ir.tensor import Tensor
from .utils import check_in_graph
__all__ = ['group_norm']
def group_norm(x: Tensor,
weight: Tensor,
bias: Tensor,
num_groups: int,
eps: float = 1e-5) -> Tensor:
"""
Applies Group Normalisation over a Tensor. https://arxiv.org/abs/1803.08494
Args:
x: Tensor
Tensor to be normalized.
weight: Tensor
Tensor to scale output of normalisation.
bias: Tensor
Tensor to shift output of normalisation.
Returns:
out: Tensor
The group normalised Tensor.
"""
ctx = get_current_context()
g = ctx.graph
pb_g = g._pb_graph
check_in_graph(g, x, weight, bias)
settings = ctx._get_op_settings('group_norm')
opid = _ir.OperatorIdentifier("ai.graphcore", "GroupNorm", 1,
_ir.NumInputs(3, 3), 3)
op = pb_g.createConnectedOp_GroupNormOp(
{
0: x.id,
1: weight.id,
2: bias.id
},
{
0: g._create_tensor_id("group_norm_out"),
1: g._create_tensor_id("group_norm_mean"),
2: g._create_tensor_id("group_norm_inv_std_dev"),
},
opid,
num_groups,
eps,
settings,
)
return Tensor._from_pb_tensor(op.outTensor(0))
| 2.140625 | 2 |
NoteBooks/Curso de Python/Python/Core/Decoradores/Clases_decoradores.py | Alejandro-sin/Learning_Notebooks | 1 | 12767721 | <reponame>Alejandro-sin/Learning_Notebooks<filename>NoteBooks/Curso de Python/Python/Core/Decoradores/Clases_decoradores.py
'''
Decoradores y clases.
update_wrapper:
Para evitar que el efecto de wrap suceda se usa en este aso al ser una clase
Es necesario que la clase sea llamable. Por lo que se implementa el método dunder __call__, este método se corre cada vez que se implementtratea de crear una instancia de la clase
'''
from functools import update_wrapper
class Count:
def __init__(self, func):
update_wrapper(self, func)
self.func = func
self.cnt = 0
# Incrementa el valor de un contador.
def __call__(self, *args, **kwargs):
self.cnt += 1
print(f'Current count: {self.cnt}')
result = self.func(*args, **kwargs)
return result
@Count
def fib(n):
''' return the Fibonacci sequence '''
if n < 2:
return n
else:
return fib(n-1) + fib(n-2)
fib(10)
| 3.8125 | 4 |
tests/test_apps/helloworld/wsgi.py | fphonor/flask | 52 | 12767722 | <reponame>fphonor/flask
from hello import app
| 1.023438 | 1 |
ml_params/utils.py | SamuelMarks/ml-params | 2 | 12767723 | """
Collection of utility functions
"""
from copy import deepcopy
from functools import partial
from inspect import getmembers
from operator import itemgetter
from os import environ, path
from sys import version_info
def camel_case(st, upper=False):
"""
Convert string to camel-case (upper or lower)
:param st: input string
:type st: ```str```
:param upper: upper camelcase if True, else lower camelcase
:type upper: ```bool```
:return: camel case representation of input string
:rtype: ```str```
"""
output = "".join(x for x in st.title() if x.isalnum())
return getattr(output[0], "upper" if upper else "lower")() + output[1:]
def common_dataset_handler(
ds_builder,
scale,
K,
as_numpy,
acquire_and_concat_validation_to_train=True,
**download_and_prepare_kwargs
):
"""
Helper function that is to be used by the different dataset builders
:param ds_builder: dataset builder
:type ds_builder: ```Union[tfds.core.DatasetBuilder, Tuple[tf.data.Dataset, tf.data.Dataset],
Tuple[np.ndarray, np.ndarray]```
:param scale: rescale input (divide) by this amount, None for do nothing
:type scale: ```Optional[Union[int, float]]```
:param K: backend engine, e.g., `np` or `tf`
:type K: ```Literal['np', 'tf']```
:param as_numpy: Convert to numpy ndarrays
:type as_numpy: ```bool```
:param acquire_and_concat_validation_to_train: Whether to acquire the validation split
and then concatenate it to train
:param download_and_prepare_kwargs:
:type download_and_prepare_kwargs: ```**download_and_prepare_kwargs```
:return: Train and tests dataset splits
:rtype: ```Union[Tuple[tf.data.Dataset,tf.data.Dataset,tfds.core.DatasetInfo], Tuple[np.ndarray,np.ndarray,Any]]```
"""
as_dataset_kwargs, info = {"batch_size": -1}, None
if hasattr(ds_builder, "download_and_prepare") and hasattr(
ds_builder, "as_dataset"
):
info, test_ds, train_ds = _handle_tfds(
acquire_and_concat_validation_to_train,
as_dataset_kwargs,
download_and_prepare_kwargs,
ds_builder,
info,
)
elif hasattr(ds_builder, "train_stream") and hasattr(ds_builder, "eval_stream"):
return ds_builder # Handled elsewhere, this is from trax
else:
train_ds, test_ds = ds_builder
if as_numpy:
train_ds, test_ds = to_numpy(train_ds, K), to_numpy(test_ds, K)
if K is not None and scale is not None:
if isinstance(scale, tuple):
assert scale[0] == scale[1]
scale = scale[0]
train_ds["image"] = K.float32(train_ds["image"]) / scale
test_ds["image"] = K.float32(test_ds["image"]) / scale
return train_ds, test_ds, info or train_ds._info
def _handle_tfds(
acquire_and_concat_validation_to_train,
as_dataset_kwargs,
download_and_prepare_kwargs,
ds_builder,
info,
):
"""
Helper function that is to be used by the different dataset builders
:param acquire_and_concat_validation_to_train: Whether to acquire the validation split
and then concatenate it to train
:type acquire_and_concat_validation_to_train: ```bool```
:param as_dataset_kwargs:
:type as_dataset_kwargs: ```**as_dataset_kwargs```
:param download_and_prepare_kwargs:
:type download_and_prepare_kwargs: ```**download_and_prepare_kwargs```
:param ds_builder: dataset builder
:type ds_builder: ```tfds.core.DatasetBuilder```
:param info: Dataset info
:type info: ```tfds.core.DatasetInfo```
:return: Train and tests dataset splits
:rtype: ```Union[Tuple[tf.data.Dataset,tf.data.Dataset,tfds.core.DatasetInfo], Tuple[np.ndarray,np.ndarray,Any]]```
"""
train_ds, test_ds, dl_and_prep = None, None, True
if (
"download_config" in download_and_prepare_kwargs
and download_and_prepare_kwargs["download_config"].manual_dir
):
dl_and_prep = not path.isdir(ds_builder._data_dir)
if dl_and_prep:
name_slash = "{}{}{}".format(path.sep, ds_builder.name, path.sep)
other_data_dir = ds_builder._data_dir.replace(
name_slash, "{}downloads{}".format(path.sep, name_slash)
)
dl_and_prep = not path.isdir(other_data_dir)
if not dl_and_prep:
ds_builder._data_dir = other_data_dir
if not dl_and_prep:
import tensorflow_datasets.public_api as tfds
info = ds_builder.info
ds_builder = tfds.builder(
ds_builder.name,
data_dir=environ.get(
"TFDS_DATA_DIR",
path.dirname(path.dirname(ds_builder._data_dir)),
),
)
as_dataset_kwargs.update({"as_supervised": True, "batch_size": 1})
if dl_and_prep:
ds_builder.download_and_prepare(**download_and_prepare_kwargs)
if train_ds is None:
train_ds = ds_builder.as_dataset(split="train", **as_dataset_kwargs)
valid_ds_key = next(
filter(partial(str.startswith, "valid"), ds_builder.info.splits), None
)
if valid_ds_key and acquire_and_concat_validation_to_train:
print("train was", train_ds.cardinality())
valid_ds = ds_builder.as_dataset(split=valid_ds_key, **as_dataset_kwargs)
print("validation is", valid_ds.cardinality())
train_ds = train_ds.concatenate(valid_ds)
print("train now", train_ds.cardinality())
if test_ds is None:
test_ds = ds_builder.as_dataset(split="test", **as_dataset_kwargs)
return info, test_ds, train_ds
def to_numpy(obj, K=None, device=None):
"""
Convert input to numpy
:param obj: Any input that can be converted to numpy (raises error otherwise)
:type obj: ```Any```
:param K: backend engine, e.g., `np` or `tf`; defaults to `np`
:type K: ```Literal['np', 'tf']```
:param device: The (optional) Device to which x should be transferred.
If given, then the result is committed to the device.
If the device parameter is None, then this operation behaves like the identity function
if the operand is on any device already, otherwise it transfers the data to the default device, uncommitted.
:type device: ```Optional[Device]```
:return: numpy type, probably np.ndarray
:rtype: ```np.ndarray```
"""
module_name = "numpy" if K is None else K.__name__
if obj is None:
return None if K is None else K.nan
elif type(obj).__module__ == module_name:
return obj
elif hasattr(obj, "as_numpy"):
return obj.as_numpy()
elif hasattr(obj, "numpy"):
return obj.numpy()
elif isinstance(obj, dict) and "image" in obj and "label" in obj:
if module_name == "jax.numpy":
def __to_numpy(o, _K=None):
"""
Convert input to a DeviceArray
:param o: An object with a `numpy` method
:type o: ```Any```
:param _K: backend engine, e.g., `np` or `tf`; defaults to `np`
:type _K: ```Literal['np', 'tf']```
:return: The array on the device
:rtype: ```DeviceArray```
"""
import jax
return jax.device_put(o.numpy(), device=device)
else:
__to_numpy = _to_numpy
return {
"image": __to_numpy(obj["image"], K),
"label": __to_numpy(obj["label"], K),
}
elif type(obj).__name__ == "PrefetchDataset":
# ^`isinstance` said `arg 2 must be a type or tuple of types`
import tensorflow_datasets as tfds
return tfds.as_numpy(obj)
raise TypeError("Unable to convert {!r} to numpy".format(type(obj)))
# Alias need unlike in JavaScript where you have proper hoisting
_to_numpy = to_numpy
def to_d(obj):
"""
Convert the input to a dictionary
:param obj: input value. Will have `dir` run against it if not a dict.
:type obj: ```Union[dict, Any]```
:return: Dictionary representation of input
:rtype: ```dict```
"""
return (
obj
if isinstance(obj, dict)
else dict(
filter(lambda key_inst: not key_inst[0].startswith("_"), getmembers(obj))
)
)
# The next 2 functions are from https://stackoverflow.com/a/1653248
def parse_to_argv_gen(s):
"""
Generate a sys.argv style parse of the input string
:param s: Input string
:type s: ```str```
:return: Generator of tokens; like in sys.argv
:rtype: ```Iterator[str]```
"""
_QUOTE_CHARS_DICT = {
"\\": "\\",
" ": " ",
'"': '"',
"r": "\r",
"n": "\n",
"t": "\t",
}
quoted, s_iter, join_string, c_list, c = False, iter(s), s[0:0], [], " "
err = "Bytes must be decoded to Unicode first"
while True:
# Skip whitespace
try:
while True:
assert isinstance(c, str) and version_info[0] >= 3, err
if not c.isspace():
break
c = next(s_iter)
except StopIteration:
break
# Read word
try:
while True:
assert isinstance(c, str) and version_info[0] >= 3, err
if not quoted and c.isspace():
break
if c == '"':
quoted, c = not quoted, None
elif c == "\\":
c = _QUOTE_CHARS_DICT.get(next(s_iter))
if c is not None:
c_list.append(c)
c = next(s_iter)
yield join_string.join(c_list)
c_list.clear()
except StopIteration:
yield join_string.join(c_list)
break
def parse_to_argv(s):
"""
Do a sys.argv style parse of the input string
:param s: Input string
:type s: ```str```
:return: List of tokens; like in sys.argv
:rtype: ```List[str]```
"""
return list(parse_to_argv_gen(s))
def pop_at_index(
input_list, key, default=None, process_key=lambda k: k, process_val=lambda v: v
):
"""
If key in index, remove it from list, and return it
:param input_list: Input list
:type input_list: ```list```
:param key: Lookup key
:type key: ```str```
:param default: The default value if key not in l
:type default: ```Optional[Any]```
:param process_key: Postprocess the key
:type process_key: ```Callable[[Any], Any]```
:param process_val: Postprocess the val
:type process_val: ```Callable[[Any], Any]```
:return: default if not in list, else the value from the list (and list is now minus that elem)
:rtype: ```Optional[Any]```
"""
# if process_key is not None and not isinstance(key, tuple):
# return default
try:
if process_key:
idx = next(
map(
itemgetter(0),
filter(
None,
filter(
lambda idx_e: process_key(idx_e[1]) == key,
enumerate(input_list),
),
),
)
)
else:
idx = input_list.index(key)
except (ValueError, StopIteration):
if isinstance(default, (list, tuple)) and len(default) == 1:
return default[0]
return default
else:
return deepcopy(process_val(input_list.pop(idx)))
def set_attr(object, attribute, value):
"""
Sets the named attribute on the given object to the specified value. Then returns it.
setattr(x, 'y', v) is equivalent to ``x.y = v''
:param object: The object
:type object: ```Any```
:param attribute: The attribute
:type attribute: ```str```
:param value: The value
:type value: ```Any```
"""
setattr(object, attribute, value)
return object
__all__ = [
"camel_case",
"common_dataset_handler",
"parse_to_argv",
"pop_at_index",
"set_attr",
"to_d",
"to_numpy",
]
| 2.5 | 2 |
sanic_oauthlib/utils.py | Lupino/sanic-oauthlib | 0 | 12767724 | <reponame>Lupino/sanic-oauthlib<filename>sanic_oauthlib/utils.py
# coding: utf-8
import base64
import importlib
from sanic.response import HTTPResponse
from oauthlib.common import to_unicode
def _get_uri_from_request(request):
"""
The uri returned from request.uri is not properly urlencoded
(sometimes it's partially urldecoded) This is a weird hack to get
sanic to return the proper urlencoded string uri
"""
uri = request._parsed_url.path
if request._parsed_url.query:
uri = uri+b'?'+request._parsed_url.query
try:
# these work on Sanic 19.6.1 and above
server_name = request.server_name
server_port = request.server_port
scheme = request.scheme
except (AttributeError, NotImplementedError):
override_server_name = request.app.config.get("SERVER_NAME", False)
requested_host = request.host
server_name = (override_server_name
or request.headers.get("x-forwarded-host")
or requested_host.split(":")[0])
forwarded_port = ((override_server_name.split(":")[1] if override_server_name and ":" in override_server_name else None)
or request.headers.get("x-forwarded-port")
or (requested_host.split(":")[1] if ":" in requested_host else None))
try:
server_port = ((int(forwarded_port) if forwarded_port else None)
or request._parsed_url.port
or request.transport.get_extra_info("sockname")[1])
except NotImplementedError:
server_port = 80
scheme = (request.headers.get("x-forwarded-proto")
or request.scheme)
if ":" in server_name:
server_name, server_port = server_name.split(":", 1)
include_port = True
if scheme == "https" and server_port == 443:
include_port = False
elif scheme == "http" and server_port == 80:
include_port = False
if include_port:
return scheme + "://" + server_name + ':' + str(server_port) + uri.decode('utf-8')
return scheme + "://" + server_name + uri.decode('utf-8')
def extract_params(request=None):
"""Extract request params."""
if request is None:
if 'request' in extract_params.__globals__:
request = extract_params.__globals__['request']
else:
raise ValueError('request')
uri = _get_uri_from_request(request)
http_method = request.method
headers = dict(request.headers)
if 'wsgi.input' in headers:
del headers['wsgi.input']
if 'wsgi.errors' in headers:
del headers['wsgi.errors']
body = {k:request.form.get(k) for k in request.form.keys()}
return uri, http_method, body, headers
def to_bytes(text, encoding='utf-8'):
"""Make sure text is bytes type."""
if not text:
return text
if not isinstance(text, bytes):
text = text.encode(encoding)
return text
def decode_base64(text, encoding='utf-8'):
"""Decode base64 string."""
text = to_bytes(text, encoding)
return to_unicode(base64.b64decode(text), encoding)
def create_response(headers, body, status):
"""Create response class for Sanic."""
response = HTTPResponse(body, status)
for k, v in headers.items():
response.headers[str(k)] = v
return response
def import_string(name, silent=False):
"""
Imports an object based on a string. This is useful if you want to use import paths as endpoints or something similar. An import path can be specified either in dotted notation (xml.sax.saxutils.escape) or with a colon as object delimiter (xml.sax.saxutils:escape).
If silent is True the return value will be None if the import fails.
:param name:
:type name: str
:param silent:
:type silent: bool
:return:
"""
attr_stack = []
if ":" in name:
name, obj = name.rsplit(':', 1)
attr_stack.append(obj)
try:
mod = importlib.import_module(name)
if attr_stack:
try:
return getattr(mod, attr_stack[0])
except AttributeError:
raise ImportError()
except ImportError as e:
while "." in name:
name, ext = name.rsplit('.', 1)
attr_stack.append(ext)
try:
mod = importlib.import_module(name)
except ImportError as e2:
e = e2
continue
a = mod
for i in reversed(attr_stack):
try:
a = getattr(a, i)
except AttributeError:
raise ImportError()
return a
if silent:
return None
raise e
| 2.265625 | 2 |
linkace_cli/api/search.py | vwheezy22/linkace-cli | 3 | 12767725 | from linkace_cli.api.base import APIBase
from linkace_cli import models
from linkace_cli.api.tags import Tags
from linkace_cli.api.lists import Lists
class Search(APIBase):
def __init__(self, base_url, api_token):
super(Search, self).__init__(base_url, api_token)
self.tags = Tags(base_url, api_token)
self.lists = Lists(base_url, api_token)
def get_links_by_tag_exact(self, tag_id: int):
return self.tags.links(tag_id)
def get_links_by_tag_query(self, query: str):
tag_ids = self.api.get('search/tags', {'query': query})
print(tag_ids)
links = []
for tag_id in tag_ids.keys():
links.extend(self.tags.links(tag_id))
# Deduplicate results based on ID
return list({v['id']: v for v in links}.values())
def get_links_by_list_exact(self, list_id: int):
return self.lists.links(list_id)
def get_links_by_list_query(self, query: str):
list_ids = self.api.get('search/lists', {'query': query})
links = []
for list_id in list_ids:
links.extend(self.lists.links(list_id))
# Deduplicate results based on ID
return list({v['id']: v for v in links}.values())
def get_links_by_query(self, query: str):
params = {
'query': query,
'search_title': query,
}
resp = self.api.get('search/links', params=params)
resp = models.LinksPagination().load(resp)
links = resp['data']
while(resp['next_page_url']):
resp = models.LinksPagination().load(self.api.get(resp['next_page_url']))
links.extend(resp['data'])
return links
| 2.28125 | 2 |
test/mock_helpers.py | yeastgenome/SGDBackend-Nex2 | 5 | 12767726 | from src.models import AlleleGeninteraction, Alleledbentity, Complexdbentity, CurationReference, Dnasequenceannotation, Functionalcomplementannotation, Literatureannotation, Locusdbentity, Pathwaydbentity, Proteinabundanceannotation, Referencedbentity
from . import fixtures as factory
from mock import Mock
class MockQueryFilter(object):
def __init__(self, query_params, query_result):
self._return = query_result
self._params = query_params
def one_or_none(self):
if self._return.__class__ == list:
return self._return[0]
else:
return self._return
def first(self):
return self._return
def order_by(self, *args, **kwargs):
return self
def group_by(self, *args, **kwargs):
return self
def asc(self, *args, **kwargs):
return self
def all(self):
if self._return is None:
return []
elif self._return.__class__ == list:
return self._return
else:
return [self._return]
def count(self):
return 7
def query_params(self):
return self._params
def distinct(self, *args, **kwargs):
return self
def outerjoin(self, *args, **kwargs):
return self
def scalar(self,*args,**kwargs):
return 7
def join(self, *args, **kwargs):
return self
def join(self, *args, **kwargs):
return self
def join(self, *args, **kwargs):
return self
def filter_by(self, *args, **kwargs):
return self
def filter(self, *args, **kwargs):
return self
class MockQuery(object):
def __init__(self, query_result):
self._query_result = query_result
def filter_by(self, **query_params):
self._query_filter = MockQueryFilter(query_params, self._query_result)
self._full_params = query_params
return self._query_filter
def filter(self, *query_params):
self._query_filter = MockQueryFilter(query_params[0], self._query_result)
self._full_params = query_params
return self._query_filter
def all(self):
return self._query_result
def distinct(self, *query_params):
if len(query_params) == 0 and self._query_result:
return self._query_result
else:
return self
def outerjoin(self,query_params):
return self
def join(self, *args, **kwargs):
return self
def join(self, *args, **kwargs):
return self
def count(self):
return 1
def join(self, *args, **kwargs):
return self
def order_by(self, query_params):
return self
def limit(self, query_params):
return self
class MockFileStorage(object):
pass
def go_side_effect(*args, **kwargs):
if len(args) == 1 and str(args[0]) == "<class 'src.models.Go'>":
go = factory.GoFactory()
return MockQuery(go)
if len(args) == 2 and str(args[0]) == 'Goannotation.dbentity_id' and str(args[1]) == 'count(nex.goannotation.dbentity_id)':
go = factory.GoFactory()
goannot = factory.GoannotationFactory()
goannot.go = go
return MockQuery(goannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.GoRelation'>":
gochild = factory.GoFactory()
goparent = factory.GoFactory()
gorel = factory.GoRelationFactory()
ro = factory.RoFactory()
gorel.child = gochild
gorel.parent = goparent
gorel.ro = ro
return MockQuery(gorel)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.GoUrl'>":
gourl = factory.GoUrlFactory()
return MockQuery(gourl)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.GoAlias'>":
goalias = factory.GoAliasFactory()
return MockQuery(goalias)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Locusdbentity'>":
locus = factory.LocusdbentityFactory()
return MockQuery(locus)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Goannotation'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
refdbentity.journal = journal
dbent = factory.DbentityFactory()
go = factory.GoFactory()
goannot = factory.GoannotationFactory()
goannot.go = go
goannot.dbentity = dbent
goannot.reference = refdbentity
goannot.source = source
return MockQuery(goannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.EcoAlias'>":
ecoalias = factory.EcoAliasFactory()
return MockQuery(ecoalias)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.EcoUrl'>":
ecourl = factory.EcoUrlFactory()
return MockQuery(ecourl)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Goextension'>":
ro = factory.RoFactory()
goext = factory.GoextensionFactory()
goext.ro = ro
return MockQuery(goext)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Dbentity'>":
dbent = factory.DbentityFactory()
return MockQuery(dbent)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Chebi'>":
chebi = factory.ChebiFactory()
return MockQuery(chebi)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Gosupportingevidence'>":
goevd = factory.GosupportingevidenceFactory()
return MockQuery(goevd)
def locus_expression_side_effect(*args, **kwargs):
if len(args) == 1 and str(args[0]) == "<class 'src.models.Locusdbentity'>":
locus = factory.LocusdbentityFactory()
return MockQuery(locus)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Expressionannotation'>":
expannot = factory.ExpressionannotationFactory()
return MockQuery(expannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Dataset'>":
dataset = factory.DatasetFactory()
return MockQuery(dataset)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Referencedbentity'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
refdbentity.journal = journal
return MockQuery(refdbentity)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.DatasetKeyword'>":
dskw = factory.DatasetKeywordFactory()
return MockQuery(dskw)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.DatasetReference'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
dsref = factory.DatasetReferenceFactory()
dsref.reference = refdbentity
ds = factory.DatasetFactory()
dsref.dataset = ds
return MockQuery((dsref,))
elif len(args) == 1 and str(args[0]) == 'Referencedocument.html':
refdoc = factory.ReferencedocumentFactory()
return MockQuery(refdoc.html)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Datasetsample'>":
dss = factory.DatasetsampleFactory()
return MockQuery(dss)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.DatasetUrl'>":
dsurl = factory.DatasetUrlFactory()
return MockQuery(dsurl)
def complex_side_effect(*args, **kwargs):
if len(args) == 1 and str(args[0]) == "<class 'src.models.Complexdbentity'>":
complex = factory.ComplexdbentityFactory()
return MockQuery(complex)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Complexbindingannotation'>":
bind = factory.ComplexbindingannotationFactory()
interactor = factory.InteractorFactory()
locus =factory.LocusdbentityFactory()
interactor.locus = locus
bind.interactor = interactor
bindingInteractor = factory.InteractorFactory()
locus2 =factory.LocusdbentityFactory()
bindingInteractor.locus = locus2
bind.binding_interactor = bindingInteractor
return MockQuery(bind)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.ComplexAlias'>":
alias = factory.ComplexAliasFactory()
return MockQuery(alias)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.ComplexGo'>":
complexGo = factory.ComplexGoFactory()
go = factory.GoFactory()
complexGo.go = go
return MockQuery(complexGo)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.ComplexReference'>":
complexRef = factory.ComplexReferenceFactory()
ref = factory.ReferencedbentityFactory()
complexRef.reference = ref
return MockQuery(complexRef)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.ReferenceUrl'>":
refUrl = factory.ReferenceUrlFactory()
return MockQuery(refUrl)
elif len(args) == 2 and str(args[0]) == 'Goannotation.dbentity_id' and str(args[1]) == 'count(nex.goannotation.dbentity_id)':
goAnnot = factory.GoannotationFactory()
return MockQuery(goAnnot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.GoRelation'>":
goRel = factory.GoRelationFactory()
return MockQuery(goRel)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.GoUrl'>":
goUrl = factory.GoUrlFactory()
return MockQuery(goUrl)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.GoAlias'>":
goAlias = factory.GoAliasFactory()
return MockQuery(goAlias)
def locus_side_effect(*args, **kwargs):
if len(args) == 1 and str(args[0]) == "<class 'src.models.Locusdbentity'>":
locus = factory.LocusdbentityFactory()
return MockQuery(locus)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Proteinabundanceannotation'>":
protein_abundance_annotation = factory.ProteinabundanceAnnotationFactory()
eco = factory.EcoFactory()
protein_abundance_annotation.eco = eco
efo = factory.EfoFactory()
protein_abundance_annotation.efo = efo
db_entity = factory.DbentityFactory()
protein_abundance_annotation.dbentity = db_entity
ref = factory.ReferencedbentityFactory()
protein_abundance_annotation.reference = ref
orig_ref = factory.ReferencedbentityFactory()
protein_abundance_annotation.original_reference = orig_ref
chebi = factory.ChebiFactory()
protein_abundance_annotation.chebi = chebi
go = factory.GoFactory()
protein_abundance_annotation.go = go
src = factory.SourceFactory()
protein_abundance_annotation.src = src
tax = factory.TaxonomyFactory()
protein_abundance_annotation.tax = tax
return MockQuery(protein_abundance_annotation)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Bindingmotifannotation'>":
bind = factory.BindingmotifannotationFactory()
return MockQuery(bind)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Complexbindingannotation'>":
bind = factory.ComplexbindingannotationFactory()
return MockQuery(bind)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Go'>":
go = factory.GoFactory()
return MockQuery(go)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Phenotypeannotation'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
refdbentity.journal = journal
mut = factory.ApoFactory()
exp = factory.ApoFactory()
pheno = factory.PhenotypeFactory()
db = factory.DbentityFactory()
phenoannot = factory.PhenotypeannotationFactory()
phenoannot.mutant = mut
phenoannot.experiment = exp
phenoannot.phenotype = pheno
phenoannot.dbentity = db
phenoannot.reference = refdbentity
return MockQuery(phenoannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Straindbentity'>":
s_name = factory.StraindbentityFactory()
return MockQuery(s_name)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Apo'>":
apo = factory.ApoFactory()
return MockQuery(apo)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Interactor'>":
interactor = factory.InteractorFactory()
return MockQuery(interactor)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.PhenotypeannotationCond'>":
phenocond = factory.PhenotypeannotationCondFactory()
return MockQuery(phenocond)
elif len(args) == 2 and str(args[0]) == 'Chebi.display_name' and str(args[1]) == 'Chebi.obj_url':
chebi = factory.ChebiFactory()
return MockQuery((chebi.display_name, chebi.obj_url))
elif len(args) == 2 and str(args[0]) == 'Dbentity.display_name' and str(args[1]) == 'Dbentity.format_name':
db = factory.DbentityFactory()
return MockQuery(db.format_name)
elif len(args) == 1 and str(args[0]) == 'Proteinsequenceannotation.annotation_id':
prtseq = factory.ProteinsequenceannotationFactory()
return MockQuery((prtseq.annotation_id,))
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Proteinsequenceannotation'>":
prtseq = factory.ProteinsequenceannotationFactory()
return MockQuery(prtseq)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.ProteinsequenceDetail'>":
prtseq = factory.ProteinsequenceannotationFactory()
prtseqdetail = factory.ProteinsequenceDetailFactory()
prtseqdetail.annotation = prtseq
return MockQuery(prtseqdetail)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Goslimannotation'>":
goslimannot = factory.GoslimannotationFactory()
goslim = factory.GoslimFactory()
goslimannot.goslim = goslim
return MockQuery(goslimannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Goannotation'>":
go = factory.GoFactory()
goannot = factory.GoannotationFactory()
goannot.go = go
return MockQuery(goannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Disease'>":
do = factory.DiseaseFactory()
return MockQuery(do)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Diseaseannotation'>":
do = factory.DiseaseFactory()
doannot = factory.DiseaseannotationFactory()
doannot.do = do
dbentity = factory.DbentityFactory()
doannot.dbentity = dbentity
eco = factory.EcoFactory()
doannot.eco = eco
ref = factory.ReferencedbentityFactory()
doannot.reference = ref
src = factory.SourceFactory()
doannot.source = src
taxonomy = factory.TaxonomyFactory()
doannot.taxonomy = taxonomy
return MockQuery(doannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.EcoAlias'>":
ecoalias = factory.EcoAliasFactory()
return MockQuery(ecoalias)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.EcoUrl'>":
ecourl = factory.EcoUrlFactory()
return MockQuery(ecourl)
elif len(args) == 1 and str(args[0]) == 'Locussummary.html':
ls = factory.LocussummaryFactory()
return MockQuery(ls.html)
elif len(args) == 2 and str(args[0]) == 'Phenotypeannotation.taxonomy_id' and str(
args[1]) == 'count(nex.phenotypeannotation.taxonomy_id)':
pheno = factory.PhenotypeFactory()
phenoannot = factory.PhenotypeannotationFactory()
phenoannot.phenotype = pheno
return MockQuery((phenoannot.taxonomy_id, 20))
elif len(args) == 2 and str(args[0]) == 'Phenotypeannotation.taxonomy_id' and str(
args[1]) == 'Phenotypeannotation.annotation_id':
pheno = factory.PhenotypeFactory()
phenoannot = factory.PhenotypeannotationFactory()
phenoannot.phenotype = pheno
return MockQuery(phenoannot)
elif len(args) == 2 and str(args[0]) == 'PhenotypeannotationCond.annotation_id' and str(args[1]) == 'count(DISTINCT nex.phenotypeannotation_cond.group_id)':
phenocond = factory.PhenotypeannotationCondFactory()
return MockQuery((phenocond.annotation_id, 20))
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Straindbentity'>":
s_name = factory.StraindbentityFactory()
return MockQuery(s_name)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Phenotypeannotation'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
refdbentity.journal = journal
mut = factory.ApoFactory()
exp = factory.ApoFactory()
pheno = factory.PhenotypeFactory()
db = factory.DbentityFactory()
phenoannot = factory.PhenotypeannotationFactory()
phenoannot.mutant = mut
phenoannot.experiment = exp
phenoannot.phenotype = pheno
phenoannot.dbentity = db
phenoannot.reference = refdbentity
return MockQuery(phenoannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.PhenotypeannotationCond'>":
phenocond = factory.PhenotypeannotationCondFactory()
return MockQuery(phenocond)
elif len(args) == 2 and str(args[0]) == 'Chebi.display_name' and str(args[1]) == 'Chebi.obj_url':
chebi = factory.ChebiFactory()
return MockQuery((chebi.display_name, chebi.obj_url))
elif len(args) == 2 and str(args[0]) == 'Goannotation.dbentity_id' and str(args[1]) == 'count(nex.goannotation.dbentity_id)':
goannot = factory.GoannotationFactory()
return MockQuery(goannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Apo'>":
apo = factory.ApoFactory()
return MockQuery(apo)
elif len(args) == 2 and str(args[0]) == 'Physinteractionannotation.biogrid_experimental_system' and str(args[1]) == 'count(nex.physinteractionannotation.annotation_id)':
physannot = factory.PhysinteractionannotationFactory()
return MockQuery((physannot.biogrid_experimental_system, 20))
elif len(args) == 2 and str(args[0]) == 'Geninteractionannotation.biogrid_experimental_system' and str(args[1]) == 'count(nex.geninteractionannotation.annotation_id)':
genannot = factory.GeninteractionannotationFactory()
return MockQuery((genannot.biogrid_experimental_system, 20))
elif len(args) == 1 and str(args[0]) == 'Physinteractionannotation.dbentity2_id':
physannot = factory.PhysinteractionannotationFactory()
return MockQuery(physannot.dbentity2_id)
elif len(args) == 1 and str(args[0]) == 'Physinteractionannotation.dbentity1_id':
physannot = factory.PhysinteractionannotationFactory()
return MockQuery(physannot.dbentity1_id)
elif len(args) == 1 and str(args[0]) == 'Geninteractionannotation.dbentity2_id':
genannot = factory.GeninteractionannotationFactory()
return MockQuery(genannot.dbentity2_id)
elif len(args) == 1 and str(args[0]) == 'Geninteractionannotation.dbentity1_id':
genannot = factory.GeninteractionannotationFactory()
return MockQuery(genannot.dbentity1_id)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Regulationannotation'>":
regannot = factory.RegulationannotationFactory()
eco = factory.EcoFactory()
go = factory.GoFactory()
reference = factory.ReferencedbentityFactory()
regulator = factory.DbentityFactory()
source = factory.SourceFactory()
target = factory.DbentityFactory()
taxonomy = factory.TaxonomyFactory()
regannot.eco = eco
regannot.go = go
regannot.reference = reference
regannot.regulator = regulator
regannot.source = source
regannot.target = target
regannot.taxonomy = taxonomy
return MockQuery(regannot)
elif len(args) == 2 and str(args[0]) == 'Regulationannotation.target_id' and str(args[1]) == 'Regulationannotation.regulator_id':
regannot = factory.RegulationannotationFactory()
return MockQuery((regannot.target_id, regannot.regulator_id))
elif len(args) == 2 and str(args[0]) == 'Literatureannotation.topic' and str(args[1]) == 'count(nex.literatureannotation.annotation_id)':
litannot = factory.LiteratureannotationFactory()
return MockQuery((litannot.topic, 20))
elif len(args) == 1 and str(args[0]) == 'Literatureannotation.reference_id':
litannot = factory.LiteratureannotationFactory()
return MockQuery(litannot.reference_id)
elif len(args) == 1 and str(args[0]) == 'Geninteractionannotation.reference_id':
genannot = factory.GeninteractionannotationFactory()
return MockQuery(genannot.reference_id)
elif len(args) == 1 and str(args[0]) == 'Physinteractionannotation.reference_id':
physannot = factory.PhysinteractionannotationFactory()
return MockQuery(physannot.reference_id)
elif len(args) == 1 and str(args[0]) == 'Regulationannotation.reference_id':
regannot = factory.RegulationannotationFactory()
return MockQuery(regannot.reference_id)
elif len(args) == 1 and str(args[0]) == 'Regulationannotation.target_id':
regannot = factory.RegulationannotationFactory()
return MockQuery(regannot.target_id)
elif len(args) == 1 and str(args[0]) == 'Literatureannotation.reference_id':
litannot = factory.LiteratureannotationFactory()
return MockQuery(litannot.reference_id)
elif len(args) == 1 and str(args[0]) == 'Phenotypeannotation.reference_id':
phenannot = factory.PhenotypeannotationFactory()
return MockQuery(phenannot.reference_id)
elif len(args) == 1 and str(args[0]) == 'Goannotation.reference_id':
goannot = factory.GoannotationFactory()
return MockQuery(goannot.reference_id)
elif len(args) == 1 and str(args[0]) == 'ReferenceAlias.reference_id':
refalias = factory.ReferenceAliasFactory()
return MockQuery(refalias.reference_id)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.LocusAlias'>":
localias = factory.LocusAliasFactory()
source = factory.SourceFactory()
localias.source = source
return MockQuery(localias)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.LocusAliasReferences'>":
localiasref = factory.LocusAliasReferencesFactory()
source = factory.SourceFactory()
ref = factory.ReferencedbentityFactory()
localiasref.reference = ref
localiasref.source = source
return MockQuery(localiasref)
elif len(args) == 1 and str(args[0]) == 'Apo.apo_id':
apo = factory.ApoFactory()
return MockQuery(apo.apo_id)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.ReferenceUrl'>":
refurl = factory.ReferenceUrlFactory()
return MockQuery(refurl)
elif len(args) == 1 and str(args[0]) == 'Dnasequenceannotation.so_id':
dnaseq = factory.DnasequenceannotationFactory()
return MockQuery((dnaseq.so_id,))
elif len(args) == 1 and str(args[0]) == 'So.display_name':
so = factory.SoFactory()
return MockQuery(so.display_name)
elif len(args) == 3 and str(args[0]) == 'Locussummary.summary_id' and str(args[1]) == 'Locussummary.html' and str(args[2]) == 'Locussummary.date_created':
ls = factory.LocussummaryFactory()
return MockQuery((ls.summary_id, ls.html, ls.date_created))
elif len(args) == 5 and str(args[0]) == 'Locussummary.summary_id' \
and str(args[1]) == 'Locussummary.html' and str(args[2]) == 'Locussummary.date_created' \
and str(args[3]) == 'Locussummary.summary_order' and str(args[4]) == 'Locussummary.summary_type':
ls = factory.LocussummaryFactory()
return MockQuery((ls.summary_id, ls.html, ls.date_created, ls.summary_order, ls.summary_type))
elif len(args) == 1 and str(args[0]) == "<class 'src.models.LocusReferences'>":
lref = factory.LocusReferencesFactory()
ref = factory.ReferencedbentityFactory()
lref.reference = ref
return MockQuery(lref)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.LocusRelation'>":
lrel = factory.LocusRelationFactory()
parent = factory.LocusdbentityFactory()
child = factory.LocusdbentityFactory()
source = factory.SourceFactory()
ro = factory.RoFactory()
lrel.parent = parent
lrel.child = child
lrel.source = source
lrel.ro = ro
return MockQuery(lrel)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.LocusRelationReference'>":
lrel_ref = factory.LocusRelationReferenceFactory()
ref = factory.ReferencedbentityFactory()
lrel_ref.reference = ref
return MockQuery(lrel_ref)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.LocussummaryReference'>":
lsref = factory.LocussummaryReferenceFactory()
ref = factory.ReferencedbentityFactory()
source = factory.SourceFactory()
summary = factory.LocussummaryFactory()
lsref.source = source
lsref.reference = ref
lsref.summary = summary
return MockQuery(lsref)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Locusnote'>":
lnote = factory.LocusnoteFactory()
source = factory.SourceFactory()
lnote.source = source
return MockQuery(lnote)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.LocusnoteReference'>":
lnote_ref = factory.LocusnoteFactory()
note = factory.LocusnoteFactory()
ref = factory.ReferencedbentityFactory()
source = factory.SourceFactory()
lnote_ref.note = note
lnote_ref.reference = ref
lnote_ref.source = source
return MockQuery(lnote_ref)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.LocusUrl'>":
lurl = factory.LocusUrlFactory()
return MockQuery(lurl)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Locusnoteannotation'>":
laf = factory.LocusnoteannotationFactory()
return MockQuery(laf)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Pathwayannotation'>":
paf = factory.PathwayannotationFactory()
dbentity = factory.DbentityFactory()
ec = factory.EcFactory()
pathway = factory.PathwaydbentityFactory()
ref = factory.ReferencedbentityFactory()
src = factory.SourceFactory()
tax = factory.TaxonomyFactory()
paf.dbentity = dbentity
paf.ec = ec
paf.pathway = pathway
paf.reference = ref
paf.source = src
paf.taxonomy = tax
return MockQuery(paf)
elif len(args) == 1 and str(args[0]) == 'PathwayUrl.obj_url':
path_url = factory.PathwayUrlFactory()
return MockQuery(path_url.obj_url)
elif len(args) == 1 and str(args[0]) == 'Dbentity.display_name':
dbentity = factory.DbentityFactory()
return MockQuery(dbentity.display_name)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Reservedname'>":
rname = factory.ReservednameFactory()
return MockQuery(rname)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Posttranslationannotation'>":
pta = factory.PosttranslationannotationFactory()
source = factory.SourceFactory()
psi = factory.PsimodFactory()
pta.source = source
pta.psimod = psi
return MockQuery(pta)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Referencedbentity'>":
refdb = factory.ReferencedbentityFactory()
return MockQuery(refdb)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Proteinexptannotation'>":
prt = factory.ProteinexptannotationFactory()
return MockQuery(prt)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Proteindomainannotation'>":
pda = factory.ProteindomainannotationFactory()
pd = factory.ProteindomainFactory()
source = factory.SourceFactory()
db = factory.DbentityFactory()
pd.source = source
pda.proteindomain = pd
pda.dbentity = db
return MockQuery(pda)
elif len(args) == 3 and str(args[0]) == 'Dbentity.display_name' and str(args[1]) == 'Dbentity.format_name' and str(args[2]) == 'Dbentity.obj_url':
db = factory.DbentityFactory()
return MockQuery((db.display_name, db.format_name, db.obj_url))
elif len(args) == 4 and str(args[0]) == 'Dbentity.dbentity_id' and str(args[1]) == 'Dbentity.display_name' and str(args[2]) == 'Dbentity.format_name' and str(args[3]) == 'Dbentity.obj_url':
db = factory.DbentityFactory()
return MockQuery((db.dbentity_id, db.display_name, db.format_name, db.obj_url))
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Proteindomain'>":
pd = factory.ProteindomainFactory()
source = factory.SourceFactory()
pd.source = source
return MockQuery(pd)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.ProteindomainUrl'>":
pdurl = factory.ProteindomainUrlFactory()
pd = factory.ProteindomainFactory()
source = factory.SourceFactory()
pd.source = source
return MockQuery(pdurl)
elif len(args) == 1 and str(args[0]) == 'Proteindomainannotation.dbentity_id':
pda = factory.ProteindomainannotationFactory()
return MockQuery((pda.dbentity_id))
elif len(args) == 1 and str(args[0]) == 'Dbentity.format_name':
db = factory.DbentityFactory()
return MockQuery((db.format_name,))
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Locussummary'>":
locus_summary = factory.LocussummaryFactory()
return MockQuery(locus_summary)
elif len(args) == 1 and str(args[0]) == "LocussummaryReference.reference_id":
locus_summary_reference = factory.LocussummaryReferenceFactory()
return MockQuery(locus_summary_reference.reference_id)
elif len(args) == 1 and str(args[0]) == "Referencedbentity.pmid":
reference = factory.ReferencedbentityFactory()
reference.pmid = []
return MockQuery(reference.pmid)
elif len(args) == 2 and str(args[0]) == "<class 'src.models.LocusAliasReferences'>" and str(args[1]) == "Referencedbentity.pmid":
locus_alias_reference = factory.LocusAliasReferencesFactory()
reference = factory.ReferencedbentityFactory()
return MockQuery((locus_alias_reference,reference.pmid))
elif len(args) == 2 and str(args[0]) == "<class 'src.models.LocusReferences'>" and str(args[1]) == "Referencedbentity.pmid":
locus_reference = factory.LocusReferencesFactory()
reference = factory.ReferencedbentityFactory()
return MockQuery((locus_reference, reference.pmid))
elif len(args) == 1 and str(args[0]) == "LocusAlias.display_name":
locus_alias = factory.LocusAliasFactory()
return MockQuery(locus_alias)
return MockQuery((db.format_name))
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Expressionannotation'>":
exp = factory.ExpressionannotationFactory()
return MockQuery(exp)
elif len(args) == 3 and str(args[0]) == 'Expressionannotation.dbentity_id' and str(args[1]) == 'Expressionannotation.datasetsample_id' and str(args[2]) == 'Expressionannotation.normalized_expression_value':
exp = factory.ExpressionannotationFactory()
return MockQuery(exp)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Literatureannotation'>":
lit_annot = factory.LiteratureannotationFactory()
return MockQuery(lit_annot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Diseasesupportingevidence'>":
dis_evidence = factory.DiseasesupportingevidenceFactory()
do_annot = factory.DiseaseannotationFactory()
dis_evidence.annotation = do_annot
return MockQuery(dis_evidence)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Dbentity'>":
dbentity = factory.DbentityFactory()
src = factory.SourceFactory()
dbentity.source = src
return MockQuery(dbentity)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Physinteractionannotation'>":
phys_annot = factory.PhysinteractionannotationFactory()
dbentity1 = factory.DbentityFactory()
phys_annot.dbentity1 = dbentity1
dbentity2 = factory.DbentityFactory()
phys_annot.dbentity2 = dbentity2
psimod = factory.PsimodFactory()
phys_annot.psimod = psimod
ref = factory.ReferencedbentityFactory()
phys_annot.reference = ref
src = factory.SourceFactory()
phys_annot.source = src
taxonomy = factory.TaxonomyFactory()
phys_annot.taxonomy = taxonomy
return MockQuery(phys_annot)
elif len(args) == 1 and args[0] == Functionalcomplementannotation:
complement = factory.FunctionalcomplementannotationFactory()
complement.dbentity = factory.DbentityFactory()
complement.reference = factory.ReferencedbentityFactory()
complement.source = factory.SourceFactory()
complement.eco = factory.EcoFactory()
complement.ro = factory.RoFactory()
complement.taxonomy = factory.TaxonomyFactory()
return MockQuery(complement)
elif len(args) == 1 and args[0] == Dnasequenceannotation:
sequence = factory.DnasequenceannotationFactory()
sequence.config = factory.ContigFactory()
sequence.dbentity = factory.DbentityFactory()
sequence.file = factory.FiledbentityFactory()
sequence.genomerelease = factory.GenomereleaseFactory()
sequence.reference = factory.ReferencedbentityFactory()
sequence.so = factory.SoFactory()
sequence.source = factory.SourceFactory()
sequence.taxonomy = factory.TaxonomyFactory()
return MockQuery(sequence)
else:
print("Locus side effect condition not handled!!!!")
print(args[0])
def phenotype_side_effect(*args, **kwargs):
if len(args) == 1 and str(args[0]) == "<class 'src.models.Phenotype'>":
obs = factory.ApoFactory()
qual = factory.ApoFactory()
pheno = factory.PhenotypeFactory()
pheno.observable = obs
pheno.qualifier = qual
return MockQuery(pheno)
elif len(args) == 2 and str(args[0]) == 'Phenotypeannotation.taxonomy_id' and str(args[1]) == 'count(nex.phenotypeannotation.taxonomy_id)':
pheno = factory.PhenotypeFactory()
phenoannot = factory.PhenotypeannotationFactory()
phenoannot.phenotype = pheno
return MockQuery((phenoannot.taxonomy_id, 20))
elif len(args) == 2 and str(args[0]) == 'Phenotypeannotation.taxonomy_id' and str(args[1]) == 'Phenotypeannotation.annotation_id':
pheno = factory.PhenotypeFactory()
phenoannot = factory.PhenotypeannotationFactory()
phenoannot.phenotype = pheno
return MockQuery(phenoannot)
elif len(args) == 2 and str(args[0]) == 'PhenotypeannotationCond.annotation_id' and str(args[1]) == 'count(DISTINCT nex.phenotypeannotation_cond.group_id)':
phenocond = factory.PhenotypeannotationCondFactory()
return MockQuery((phenocond.annotation_id, 20))
elif len(args) == 2 and str(args[0]) == 'PhenotypeannotationCond.annotation_id' and str(args[1]) == ' func.count(distinct(PhenotypeannotationCond.group_id))':
phenocond = factory.PhenotypeannotationCondFactory()
return MockQuery((phenocond.annotation_id, 20))
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Straindbentity'>":
s_name = factory.StraindbentityFactory()
return MockQuery(s_name)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Phenotypeannotation'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
refdbentity.journal = journal
mut = factory.ApoFactory()
exp = factory.ApoFactory()
pheno = factory.PhenotypeFactory()
db = factory.DbentityFactory()
phenoannot = factory.PhenotypeannotationFactory()
phenoannot.mutant = mut
phenoannot.experiment = exp
phenoannot.phenotype = pheno
phenoannot.dbentity = db
phenoannot.reference = refdbentity
return MockQuery(phenoannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.PhenotypeannotationCond'>":
phenocond = factory.PhenotypeannotationCondFactory()
return MockQuery(phenocond)
elif len(args) == 2 and str(args[0]) == 'Chebi.display_name' and str(args[1]) == 'Chebi.obj_url':
chebi = factory.ChebiFactory()
return MockQuery((chebi.display_name, chebi.obj_url))
elif len(args) == 2 and str(args[0]) == 'Goannotation.dbentity_id' and str(args[1]) == 'count(nex.goannotation.dbentity_id)':
goannot = factory.GoannotationFactory()
return MockQuery(goannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Apo'>":
apo = factory.ApoFactory()
return MockQuery(apo)
def observable_side_effect(*args, **kwargs):
if len(args) == 1 and str(args[0]) == "<class 'src.models.Apo'>":
apo = factory.ApoFactory()
return MockQuery(apo)
elif len(args) == 3 and str(args[0]) == 'Phenotype.obj_url' and str(args[1]) == 'Phenotype.qualifier_id' and str(args[2]) == 'Phenotype.phenotype_id':
pheno = factory.PhenotypeFactory()
return MockQuery((pheno.obj_url, pheno.qualifier_id, pheno.phenotype_id,))
elif len(args) == 2 and str(args[0]) == 'Phenotypeannotation.dbentity_id' and str(args[1]) == 'count(nex.phenotypeannotation.dbentity_id)':
pheno = factory.PhenotypeFactory()
phenoannot = factory.PhenotypeannotationFactory()
phenoannot.phenotype = pheno
return MockQuery((phenoannot.dbentity_id, 20))
elif len(args) == 1 and str(args[0]) == "<class 'src.models.ApoRelation'>":
parent = factory.ApoFactory()
child = factory.ApoFactory()
ro = factory.RoFactory()
aporel = factory.ApoRelationFactory()
aporel.parent = parent
aporel.child = child
aporel.ro = ro
return MockQuery(aporel)
elif len(args) == 1 and str(args[0]) == 'Phenotype.phenotype_id':
pheno = factory.PhenotypeFactory()
return MockQuery((pheno.phenotype_id,))
elif len(args) == 1 and str(args[0]) == 'Apo.display_name':
apo = factory.ApoFactory()
return MockQuery(apo.display_name)
elif len(args) == 2 and str(args[0]) == 'Phenotypeannotation.taxonomy_id' and str(args[1]) == 'count(nex.phenotypeannotation.taxonomy_id)':
pheno = factory.PhenotypeFactory()
phenoannot = factory.PhenotypeannotationFactory()
phenoannot.phenotype = pheno
return MockQuery((phenoannot.taxonomy_id, 20))
elif len(args) == 2 and str(args[0]) == 'Phenotypeannotation.taxonomy_id' and str(args[1]) == 'Phenotypeannotation.annotation_id':
pheno = factory.PhenotypeFactory()
phenoannot = factory.PhenotypeannotationFactory()
phenoannot.phenotype = pheno
return MockQuery((phenoannot),)
elif len(args) == 2 and str(args[0]) == 'PhenotypeannotationCond.annotation_id' and str(args[1]) == 'count(DISTINCT nex.phenotypeannotation_cond.group_id)':
phenocond = factory.PhenotypeannotationCondFactory()
return MockQuery((phenocond.annotation_id, 20))
elif len(args) == 1 and str(args[0]) == 'Chebi.obj_url':
chebi = factory.ChebiFactory()
return MockQuery(chebi.obj_url)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Straindbentity'>":
s_name = factory.StraindbentityFactory()
return MockQuery(s_name)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Phenotypeannotation'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
refdbentity.journal = journal
mut = factory.ApoFactory()
exp = factory.ApoFactory()
pheno = factory.PhenotypeFactory()
db = factory.DbentityFactory()
phenoannot = factory.PhenotypeannotationFactory()
phenoannot.mutant = mut
phenoannot.experiment = exp
phenoannot.phenotype = pheno
phenoannot.dbentity = db
phenoannot.reference = refdbentity
return MockQuery(phenoannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Phenotype'>":
pheno = factory.PhenotypeFactory()
return MockQuery(pheno)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.PhenotypeannotationCond'>":
phenocond = factory.PhenotypeannotationCondFactory()
return MockQuery(phenocond)
elif len(args) == 2 and str(args[0]) == 'Chebi.display_name' and str(args[1]) == 'Chebi.obj_url':
chebi = factory.ChebiFactory()
return MockQuery((chebi.display_name, chebi.obj_url))
elif len(args) == 2 and str(args[0]) == 'Goannotation.dbentity_id' and str(args[1]) == 'count(nex.goannotation.dbentity_id)':
goannot = factory.GoannotationFactory()
return MockQuery(goannot)
else:
print("the problem is the condition!!!!")
print(args[0])
print(args[1])
def disease_side_effect(*args, **kwargs):
if len(args) == 1 and str(args[0]) == "<class 'src.models.Disease'>":
dis = factory.DiseaseFactory()
return MockQuery(dis)
if len(args) == 2 and str(args[0]) == 'Diseaseannotation.dbentity_id' and str(args[1]) == 'count(nex.diseaseannotation.dbentity_id)':
dis = factory.DiseaseFactory()
disannot = factory.DiseaseannotationFactory()
disannot.dis = dis
return MockQuery(disannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.DiseaseRelation'>":
dischild = factory.DiseaseFactory()
disparent = factory.DiseaseFactory()
disrel = factory.DiseaseRelationFactory()
ro = factory.RoFactory()
disrel.child = dischild
disrel.parent = disparent
disrel.ro = ro
return MockQuery(disrel)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.DiseaseUrl'>":
disurl = factory.DiseaseUrlFactory()
return MockQuery(disurl)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.DiseaseAlias'>":
disalias = factory.DiseaseAliasFactory()
return MockQuery(disalias)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Locusdbentity'>":
locus = factory.LocusdbentityFactory()
return MockQuery(locus)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Diseaseannotation'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
refdbentity.journal = journal
dbent = factory.DbentityFactory()
dis = factory.DiseaseFactory()
disannot = factory.DiseaseannotationFactory()
disannot.disease = dis
disannot.dbentity = dbent
disannot.reference = refdbentity
disannot.source = source
return MockQuery(disannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.EcoAlias'>":
ecoalias = factory.EcoAliasFactory()
return MockQuery(ecoalias)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.EcoUrl'>":
ecourl = factory.EcoUrlFactory()
return MockQuery(ecourl)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Dbentity'>":
dbent = factory.DbentityFactory()
return MockQuery(dbent)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Diseasesupportingevidence'>":
disevd = factory.DiseasesupportingevidenceFactory()
return MockQuery(disevd)
elif len(args) == 3 and str(args[0]) == "<class 'src.models.Diseaseannotation'>" and str(args[1]) == 'Diseasesupportingevidence.dbxref_id' and str(args[2]) == 'Diseasesupportingevidence.obj_url':
dis = factory.DiseaseFactory()
disannot = factory.DiseaseannotationFactory()
disannot.dis = dis
return MockQuery(disannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Referencedbentity'>":
refdb = factory.ReferencedbentityFactory()
return MockQuery(refdb)
def chemical_side_effect(*args, **kwargs):
if len(args) == 1 and str(args[0]) == "<class 'src.models.Chebi'>":
chem = factory.ChebiFactory()
return MockQuery(chem)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.ChebiAlia'>":
chebi_alias = factory.ChebiAliaFactory()
return MockQuery(chebi_alias)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.ChebiUrl'>":
url = factory.ChebiUrlFactory()
return MockQuery(url)
elif len(args) == 1 and str(args[0]) == 'PhenotypeannotationCond.annotation_id':
phenocond = factory.PhenotypeannotationCondFactory()
return MockQuery([(phenocond.annotation_id,)])
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Phenotypeannotation'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
refdbentity.journal = journal
db_entity = factory.DbentityFactory()
pheno = factory.PhenotypeFactory()
phenoannot = factory.Phenotypeannotation()
phenoannot.phenotype = pheno
phenoannot.dbentity = db_entity
phenoannot.reference = refdbentity
return MockQuery(phenoannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.PhenotypeannotationCond'>":
phenocond = factory.PhenotypeannotationCondFactory()
return MockQuery(phenocond)
elif len(args) == 1 and str(args[0]) == 'Chebi.obj_url':
chebi = factory.ChebiFactory()
return MockQuery(chebi.obj_url)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Straindbentity'>":
s_name = factory.StraindbentityFactory()
return MockQuery(s_name)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Apo'>":
apo = factory.ApoFactory()
return MockQuery(apo)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Interactor'>":
interactor = factory.InteractorFactory()
return MockQuery(interactor)
elif len(args) == 1 and str(args[0]) == "Interactor.interactor_id":
interactor = factory.InteractorFactory()
return MockQuery(interactor)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Complexbindingannotation'>":
bind = factory.ComplexbindingannotationFactory()
return MockQuery(bind)
elif len(args) == 1 and str(args[0]) == "Goextension.annotation_id":
ro = factory.RoFactory()
goext = factory.GoextensionFactory()
goext.ro = ro
return MockQuery(goext)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Goannotation'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
refdbentity.journal = journal
dbent = factory.DbentityFactory()
go = factory.GoFactory()
goannot = factory.GoannotationFactory()
goannot.go = go
goannot.dbentity = dbent
goannot.reference = refdbentity
goannot.source = source
return MockQuery(goannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.EcoAlias'>":
ecoalias = factory.EcAliasFactory()
return MockQuery(ecoalias)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.EcoUrl'>":
ecourl = factory.EcoUrlFactory()
return MockQuery(ecourl)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Goextension'>":
ro = factory.RoFactory()
goext = factory.GoextensionFactory()
goext.ro = ro
return MockQuery(goext)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Dbentity'>":
db = factory.DbentityFactory()
return MockQuery(db)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Gosupportingevidence'>":
goev = factory.GosupportingevidenceFactory()
return MockQuery(goev)
elif len(args) == 1 and args[0] == Proteinabundanceannotation:
prot = factory.ProteinabundanceAnnotationFactory()
prot.eco = factory.EcoFactory()
prot.efo = factory.EfoFactory()
prot.dbentity = factory.DbentityFactory()
prot.reference = factory.ReferencedbentityFactory()
prot.original_reference = factory.ReferencedbentityFactory()
prot.chebi = factory.ChebiFactory()
prot.go = factory.GoFactory()
prot.source = factory.SourceFactory()
prot.taxonomy = factory.TaxonomyFactory()
return MockQuery(prot)
elif len(args) == 1 and args[0] == Referencedbentity:
ref = factory.ReferencedbentityFactory()
ref.book = factory.BookFactory()
ref.journal = factory.JournalFactory()
return MockQuery(ref)
elif len(args) == 1 and args[0] == Pathwaydbentity:
pathway = factory.PathwaydbentityFactory()
return MockQuery(pathway)
elif len(args) == 1:
cheb = factory.ChebiAliaFactory()
return MockQuery(cheb)
else:
print("COULDN'T FIND ANYTHING CHEMICAL SIDE EFFECT")
print("args = {}, type is {}".format(args[0], type(args[0])))
return None
def author_side_effect(*args, **kwargs):
if len(args) == 1 and str(args[0]) == "<class 'src.models.Referenceauthor'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdb = factory.ReferencedbentityFactory()
refauth = factory.ReferenceauthorFactory()
refauth.reference = refdb
return MockQuery(refauth)
elif len(args) == 1 and str(args[0]) == 'Referencedocument.html':
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdb = factory.ReferencedbentityFactory()
refdb.journal = journal
refdoc = factory.ReferencedocumentFactory()
return MockQuery(refdoc.html)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.ReferenceUrl'>":
refurl = factory.ReferenceUrlFactory()
return MockQuery(refurl)
elif len(args) == 1 and str(args[0]) == 'Referencetype.display_name':
reftype = factory.ReferencetypeFactory()
return MockQuery((reftype.display_name))
def keywords_side_effect(*args, **kwargs):
if len(args) == 1 and str(args[0]) == 'DISTINCT nex.dataset_keyword.keyword_id':
dskw = factory.DatasetKeywordFactory()
kw = factory.KeywordFactory()
dskw.keyword = kw
return MockQuery((dskw.keyword_id))
elif len(args) == 1 and str(args[0]) == "<class 'src.models.DatasetKeyword'>":
dskw = factory.DatasetKeywordFactory()
return MockQuery([dskw])
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Dataset'>":
ds = factory.DatasetFactory()
return MockQuery([ds])
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Keyword'>":
kw = factory.KeywordFactory()
return MockQuery([kw])
def dataset_side_effect(*args, **kwargs):
if len(args) == 1 and str(args[0]) == "<class 'src.models.Dataset'>":
ds_name = factory.DatasetFactory()
return MockQuery(ds_name)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.DatasetKeyword'>":
dskw = factory.DatasetKeywordFactory()
kw = factory.KeywordFactory()
dskw.keyword = kw
return MockQuery(dskw)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Keyword'>":
kw = factory.KeywordFactory()
return MockQuery(kw)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.DatasetReference'>":
dsref = factory.DatasetReferenceFactory()
return MockQuery((dsref),)
elif len(args) == 1 and str(args[0]) == 'Referencedocument.html':
refdoc = factory.ReferencedocumentFactory()
return MockQuery(refdoc.html)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Datasetsample'>":
dss = factory.DatasetsampleFactory()
return MockQuery(dss)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.DatasetUrl'>":
dsurl = factory.DatasetUrlFactory()
return MockQuery(dsurl)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.DatasetFile'>":
dsf = factory.DatasetFileFactory()
f = factory.FiledbentityFactory()
dsf.file = f
return MockQuery(dsf)
def side_effect(*args, **kwargs):
if len(args) == 1 and str(args[0]) == "<class 'src.models.Straindbentity'>":
s_name = factory.StraindbentityFactory()
return MockQuery(s_name)
if len(args) == 3 and str(args[0]) == 'StrainUrl.display_name' and str(args[1]) == 'StrainUrl.url_type' and str(
args[2]) == 'StrainUrl.obj_url':
strain_url = factory.StrainUrlFactory()
return MockQuery((strain_url.display_name, strain_url.url_type, strain_url.obj_url))
elif len(args) == 2 and str(args[0]) == 'Strainsummary.summary_id' and str(args[1]) == 'Strainsummary.html':
strain_summary = factory.StrainsummaryFactory()
return MockQuery((strain_summary.summary_id, strain_summary.html))
elif len(args) == 1 and str(args[0]) == 'StrainsummaryReference.reference_id':
strain_ref = factory.StrainsummaryReferenceFactory()
return MockQuery([(strain_ref.reference_id,)])
elif len(args) == 1 and str(args[0]) == 'ReferenceUrl.reference_id':
refurl = factory.ReferenceUrlFactory()
return MockQuery(refurl.obj_url)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Referencedbentity'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
return MockQuery(refdbentity)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.ReferenceUrl'>":
refurl = factory.ReferenceUrlFactory()
return MockQuery(refurl)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Contig'>":
c_name = factory.ContigFactory()
return MockQuery(c_name)
elif len(args) == 2 and str(args[0]) == 'Contig.format_name' and str(args[1]) == 'Contig.obj_url':
c_name = factory.ContigFactory()
return MockQuery((c_name.format_name, c_name.obj_url))
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Ec'>":
ec = factory.EcFactory()
return MockQuery(ec)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.EcUrl'>":
ecurl = factory.EcUrlFactory()
return MockQuery(ecurl)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Psimod'>":
psimod = factory.PsimodFactory()
return MockQuery([psimod])
elif len(args) == 1 and str(args[0]) == "Posttranslationannotation.psimod_id":
ptm = factory.PsimodFactory()
return MockQuery([ptm])
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Dbentity'>":
dbentity = factory.DbentityFactory()
return MockQuery(dbentity)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Posttranslationannotation'>":
ptm = factory.PosttranslationannotationFactory()
dbentity = factory.DbentityFactory()
reference = factory.ReferencedbentityFactory()
source = factory.SourceFactory()
psimod = factory.PsimodFactory()
ptm.dbentity = dbentity
ptm.reference = reference
ptm.source = source
ptm.psimod = psimod
return MockQuery(ptm)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Colleague'>":
colleague = factory.ColleagueFactory()
return MockQuery([colleague,colleague])
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Colleaguetriage'>":
colleague_triage = factory.ColleaguetriageFactory()
return MockQuery([colleague_triage])
elif len(args) == 1 and str(args[0]) == "<class 'src.models.CuratorActivity'>":
curator_activity = factory.CuratorActivityFactory()
return MockQuery([curator_activity])
# def mock_extract_id_request(request, classname):
# return 'S000203483'
def locus_reference_side_effect(*args, **kwargs):
if len(args) == 1 and str(args[0]) == "<class 'src.models.Locusdbentity'>":
locus = factory.LocusdbentityFactory()
return MockQuery(locus)
elif len(args) == 1 and str(args[0]) == "Literatureannotation.reference_id":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
refdbentity.journal = journal
litannot = factory.LiteratureannotationFactory()
db = factory.DbentityFactory()
litannot.reference = refdbentity
litannot.dbentity = db
return MockQuery((litannot.reference_id,))
elif len(args) == 1 and str(args[0]) == "Geninteractionannotation.reference_id":
gen = factory.GeninteractionannotationFactory()
return MockQuery((gen.reference_id,))
elif len(args) == 1 and str(args[0]) == "Physinteractionannotation.reference_id":
gen = factory.PhysinteractionannotationFactory()
return MockQuery((gen.reference_id,))
elif len(args) == 1 and str(args[0]) == "Regulationannotation.reference_id":
reg = factory.RegulationannotationFactory()
return MockQuery((reg.reference_id,))
elif len(args) == 1 and str(args[0]) == "Phenotypeannotation.reference_id":
pheno = factory.PhenotypeannotationFactory()
return MockQuery((pheno.reference_id,))
elif len(args) == 1 and str(args[0]) == "Goannotation.reference_id":
go = factory.GoannotationFactory()
return MockQuery((go.reference_id,))
elif len(args) == 1 and str(args[0]) == "Diseaseannotation.reference_id":
do = factory.DiseaseannotationFactory()
return MockQuery((do.reference_id,))
elif len(args) == 1 and str(args[0]) == "ReferenceAlias.reference_id":
refalias = factory.ReferenceAliasFactory()
return MockQuery(refalias.reference_id)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Referencedbentity'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
refdbentity.journal = journal
return MockQuery(refdbentity)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.ReferenceUrl'>":
refurl = factory.ReferenceUrlFactory()
return MockQuery(refurl)
elif len(args) == 1 and str(args[0]) == "Apo.apo_id":
apo = factory.ApoFactory()
return MockQuery(apo.apo_id)
elif len(args) == 2 and str(args[0]) == "Phenotypeannotation.reference_id" and str(args[1]) == "Phenotypeannotation.experiment_id":
phen = factory.PhenotypeannotationFactory()
return MockQuery((phen.reference_id, phen.experiment_id))
elif len(args) == 2 and str(args[0]) == "Literatureannotation.reference_id" and str(args[1]) == "Literatureannotation.topic":
lit = factory.LiteratureannotationFactory()
return MockQuery((lit.reference_id, lit.topic))
else:
print("the problem is the condition!!!!")
print(args[0])
print(args[1])
def protein_side_effect(*args, **kwargs):
if len(args) == 1 and str(args[0]) == "<class 'src.models.Posttranslationannotation'>":
pta = factory.PosttranslationannotationFactory()
return MockQuery(pta)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Referencedbentity'>":
refdb = factory.ReferencedbentityFactory()
return MockQuery(refdb)
def sequence_side_effect(*args, **kwargs):
if len(args) == 1 and str(args[0]) == "<class 'src.models.Locusdbentity'>":
locus = factory.LocusdbentityFactory()
return MockQuery(locus)
elif len(args) == 1 and str(args[0]) == 'Locusdbentity.dbentity_id':
locus = factory.LocusdbentityFactory()
return MockQuery((locus.dbentity_id,))
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Dnasequenceannotation'>":
dnaseq = factory.DnasequenceannotationFactory()
contig = factory.ContigFactory()
locus = factory.LocusdbentityFactory()
dnaseq.contig = contig
dnaseq.dbentity = locus
return MockQuery(dnaseq)
elif len(args) == 1 and str(args[0]) == 'Dnasequenceannotation.so_id':
dnaseq = factory.DnasequenceannotationFactory()
return MockQuery([(dnaseq.so_id,)])
elif len(args) == 1 and str(args[0]) == 'So.display_name':
so = factory.SoFactory()
return MockQuery(so.display_name)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Proteinsequenceannotation'>":
prtseq = factory.ProteinsequenceannotationFactory()
contig = factory.ContigFactory()
prtseq.contig = contig
return MockQuery(prtseq)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Dnasubsequence'>":
dnasubseq = factory.DnasubsequenceFactory()
return MockQuery(dnasubseq)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Straindbentity'>":
s_name = factory.StraindbentityFactory()
return MockQuery(s_name)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Contig'>":
c_name = factory.ContigFactory()
return MockQuery(c_name)
elif len(args) == 2 and str(args[0]) == 'Dnasequenceannotation.so_id' and str(args[1]) == 'count(nex.dnasequenceannotation.annotation_id)':
dnaseq = factory.DnasequenceannotationFactory()
return MockQuery((dnaseq.so_id, 20))
elif len(args) == 1 and str(args[0]) == "<class 'src.models.So'>":
so = factory.SoFactory()
return MockQuery(so)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.ContigUrl'>":
ctgurl = factory.ContigUrlFactory()
return MockQuery(ctgurl)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.ProteinsequenceDetail'>":
prtseq = factory.ProteinsequenceDetailFactory()
return MockQuery(prtseq)
def reference_side_effect(*args, **kwargs):
if len(args) == 1 and str(args[0]) == "<class 'src.models.Referencedbentity'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
refdbentity.journal = journal
return MockQuery(refdbentity)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Locusdbentity'>":
locus = factory.LocusdbentityFactory()
return MockQuery(locus)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.DatasetReference'>":
datasetref = factory.DatasetReferenceFactory()
datasetf = factory.DatasetFactory()
datasetref.dataset = datasetf
return MockQuery(datasetref)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Dataset'>":
dataset = factory.DatasetFactory()
return MockQuery(dataset)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.DatasetKeyword'>":
datasetkw = factory.DatasetKeywordFactory()
datasetkw.keyword = factory.KeywordFactory()
return MockQuery(datasetkw)
elif len(args) == 1 and str(args[0]) == 'Referencedocument.html':
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdb = factory.ReferencedbentityFactory()
refdb.journal = journal
refdoc = factory.ReferencedocumentFactory()
return MockQuery(refdoc.html)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.ReferenceUrl'>":
refurl = factory.ReferenceUrlFactory()
return MockQuery(refurl)
elif len(args) == 1 and str(args[0]) == 'Referencetype.display_name':
reftype = factory.ReferencetypeFactory()
return MockQuery((reftype.display_name))
elif len(args) == 2 and str(args[0]) == 'Referenceauthor.display_name' and str(args[1]) == 'Referenceauthor.obj_url':
refauthor = factory.ReferenceauthorFactory()
return MockQuery((refauthor.display_name, refauthor.obj_url))
elif len(args) == 1 and str(args[0]) == "<class 'src.models.ReferenceRelation'>":
refrel = factory.ReferenceRelationFactory()
refrel.child = factory.ReferencedbentityFactory()
refrel.parent = factory.ReferencedbentityFactory()
return MockQuery((refrel))
elif len(args) == 1 and str(args[0]) == "<class 'src.models.ReferenceUrl'>":
refurl = factory.ReferenceUrlFactory()
return MockQuery(refurl)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Physinteractionannotation'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
refdbentity.journal = journal
intannot = factory.PhysinteractionannotationFactory()
intannot.reference = refdbentity
intannot.source = source
db1 = factory.DbentityFactory(dbentity_id=1)
db2 = factory.DbentityFactory(dbentity_id=2)
intannot.dbentity1 = db1
intannot.dbentity2= db2
return MockQuery((intannot))
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Geninteractionannotation'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
refdbentity.journal = journal
db1 = factory.DbentityFactory(dbentity_id=1)
db2 = factory.DbentityFactory(dbentity_id=2)
genannot = factory.GeninteractionannotationFactory()
genannot.dbentity1 = db1
genannot.dbentity2= db2
genannot.reference = refdbentity
genannot.source = source
return MockQuery((genannot))
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Goannotation'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
refdbentity.journal = journal
ecof = factory.EcoFactory()
go = factory.GoFactory()
db = factory.DbentityFactory()
goannot = factory.GoannotationFactory()
goannot.reference = refdbentity
goannot.dbentity = db
goannot.eco = ecof
goannot.go = go
goannot.source = source
return MockQuery(goannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.EcoAlias'>":
# ecof = factory.EcoFactory()
ecoalias = factory.EcoAliasFactory()
# ecoalias.eco = ecof
return MockQuery(ecoalias)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.EcoUrl'>":
ecourl = factory.EcoUrlFactory()
return MockQuery(ecourl)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Goextension'>":
ro = factory.RoFactory()
goext = factory.GoextensionFactory()
goext.ro = ro
return MockQuery(goext)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Dbentity'>":
db = factory.DbentityFactory()
return MockQuery(db)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Gosupportingevidence'>":
goev = factory.GosupportingevidenceFactory()
return MockQuery(goev)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Phenotypeannotation'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
refdbentity.journal = journal
pheno = factory.PhenotypeFactory()
db = factory.DbentityFactory()
phenoannot = factory.PhenotypeannotationFactory()
phenoannot.reference = refdbentity
phenoannot.phenotype = pheno
phenoannot.dbentity = db
return MockQuery(phenoannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Diseaseannotation'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
refdbentity.journal = journal
disease = factory.DiseaseFactory()
db = factory.DbentityFactory()
diseaseannot = factory.PhenotypeannotationFactory()
diseaseannot.reference = refdbentity
diseaseannot.disease = disease
diseaseannot.dbentity = db
return MockQuery(diseaseannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.PhenotypeannotationCond'>":
cond = factory.PhenotypeannotationCondFactory()
return MockQuery(cond)
elif len(args) == 1 and str(args[0]) == 'Chebi.obj_url':
chebi = factory.ChebiFactory()
return MockQuery(chebi.obj_url)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Straindbentity'>":
s_name = factory.StraindbentityFactory()
return MockQuery(s_name)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Apo'>":
apo = factory.ApoFactory()
return MockQuery(apo)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Regulationannotation'>":
target = factory.DbentityFactory()
regulator = factory.DbentityFactory()
regannot = factory.RegulationannotationFactory()
regannot.target = target
regannot.regulator = regulator
return MockQuery((regannot))
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Literatureannotation'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
refdbentity.journal = journal
dbentity = factory.DbentityFactory()
litannot = factory.LiteratureannotationFactory()
litannot.dbentity = dbentity
litannot.reference = refdbentity
return MockQuery(litannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Straindbentity'>":
s_name = factory.StraindbentityFactory()
return MockQuery(s_name)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.ReferenceFile'>":
file = factory.FiledbentityFactory()
referencefile = factory.ReferenceFileFactory()
referencefile.file = file
return MockQuery(referencefile)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Referencetriage'>":
reference_triage = factory.ReferencetriageFactory()
return MockQuery([reference_triage])
elif len(args) == 2 and str(args[0]) == "<class 'src.models.CurationReference'>" and str(args[1]) == "<class 'src.models.Locusdbentity'>":
curator_reference = factory.CurationReferenceFactory()
locus_dbentity = factory.LocusdbentityFactory()
mock = Mock()
mock.Locusdbentity = locus_dbentity
mock.CurationReference = curator_reference
return MockQuery([mock])
elif len(args) == 2 and str(args[0]) == "<class 'src.models.Literatureannotation'>" and str(args[1]) == "<class 'src.models.Locusdbentity'>":
literature_annotation = factory.LiteratureannotationFactory()
locus_dbentity = factory.LocusdbentityFactory()
mock = Mock()
mock.Locusdbentity = locus_dbentity
mock.Literatureannotation = literature_annotation
return MockQuery([mock])
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Posttranslationannotation'>":
ptm = factory.PosttranslationannotationFactory()
return MockQuery(ptm)
elif len(args) == 1 and args[0] == AlleleGeninteraction:
allelegen = factory.AlleleGeninteractionFactory()
allelegen.allele1 = factory.AlleledbentityFactory()
allelegen.allele2 = factory.AlleledbentityFactory()
allelegen.soruce = factory.SourceFactory()
allelegen.interaction = factory.GeninteractionannotationFactory()
return MockQuery(allelegen)
elif len(args) == 1 and args[0] == Functionalcomplementannotation:
func = factory.FunctionalcomplementannotationFactory()
return MockQuery(func)
elif len(args) == 2 and args[0] == CurationReference and args[1] == Complexdbentity:
mock = Mock()
mock.CurationReference = factory.CurationReferenceFactory()
mock.ComplexdbentityFactory = factory.ComplexdbentityFactory()
return MockQuery([mock])
elif len(args) == 2 and args[0] == CurationReference and args[1] == Pathwaydbentity:
mock = Mock()
mock.CurationReference = factory.CurationReferenceFactory()
mock.Pathwaydbentity = factory.PathwaydbentityFactory()
return MockQuery([mock])
elif len(args) == 2 and args[0] == CurationReference and args[1] == Alleledbentity:
mock = Mock()
mock.CurationReference = factory.CurationReferenceFactory()
mock.Alleledbentity = factory.AlleledbentityFactory()
return MockQuery([mock])
elif len(args) == 2 and args[0] == Literatureannotation and args[1] == Complexdbentity:
mock = Mock()
mock.Literatureannotation = factory.LiteratureannotationFactory()
mock.Complexdbentity = factory.ComplexdbentityFactory()
return MockQuery([mock])
elif len(args) == 2 and args[0] == Literatureannotation and args[1] == Pathwaydbentity:
lit = factory.LiteratureannotationFactory()
pathway = factory.ComplexdbentityFactory()
mock = Mock()
mock.Literatureannotation = lit
mock.Complexdbentity = pathway
return MockQuery([mock])
elif len(args) == 2 and args[0] == Literatureannotation and args[1] == Alleledbentity:
mock = Mock()
mock.Literatureannotation = factory.LiteratureannotationFactory()
mock.Complexdbentity = factory.AlleledbentityFactory()
return MockQuery([mock])
else:
print("the problem is the condition!!!!")
print(args)
def reference_phenotype_side_effect(*args, **kwargs):
if len(args) == 1 and str(args[0]) == "<class 'src.models.Referencedbentity'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
refdbentity.journal = journal
return MockQuery(refdbentity)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Phenotypeannotation'>":
source = factory.SourceFactory()
journal = factory.JournalFactory()
book = factory.BookFactory()
refdbentity = factory.ReferencedbentityFactory()
refdbentity.journal = journal
#pheno = factory.PhenotypeFactory()
db = factory.DbentityFactory()
phenoannot = factory.PhenotypeannotationFactory()
phenoannot.reference = refdbentity
#phenoannot.phenotype = pheno
phenoannot.dbentity = db
return MockQuery(phenoannot)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.PhenotypeannotationCond'>":
cond = factory.PhenotypeannotationCondFactory()
return MockQuery(cond)
elif len(args) == 1 and str(args[0]) == 'Chebi.obj_url':
chebi = factory.ChebiFactory()
return MockQuery(chebi.obj_url)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Straindbentity'>":
s_name = factory.StraindbentityFactory()
return MockQuery(s_name)
elif len(args) == 1 and str(args[0]) == "<class 'src.models.Apo'>":
apo = factory.ApoFactory()
return MockQuery(apo)
def strain_side_effect(*args, **kwargs):
if len(args) == 1 and str(args[0]) == "<class 'src.models.Straindbentity'>":
s_name = factory.StraindbentityFactory()
return MockQuery([s_name])
| 2.25 | 2 |
test/test_preprocess_emotion.py | SvenElyes/Textanalytics | 1 | 12767727 | <reponame>SvenElyes/Textanalytics
import unittest
import pandas as pd
import os
import pytest
import numpy as np
from pandas.testing import assert_frame_equal
import src.preprocess_emotion as preprocess_emotion
class Test_preprocess_emotion(unittest.TestCase):
def test_clearText(self):
test = "this IS just love \n going to be a testcase - AMEN."
out = preprocess_emotion.clearText(test)
check = "this is just love going to be a testcase amen"
self.assertEqual(out, check)
def test_main(self):
prepro_in = pd.read_csv("test/csv_test/prepro_in.csv")
df_bible = preprocess_emotion.main("", prepro_in, "test/csv_test/out.csv")
# dropped from evaluation because it uses randomness
df_bible.drop(["similarity_emotion"], axis=1, inplace=True)
prepro_out = pd.read_csv("test/csv_test/prepro_out.csv")
prepro_out = prepro_out[
["Unnamed: 0", "characters", "text", "tb_emotion", "bow_emotion", "emotion"]
]
assert_frame_equal(prepro_out, df_bible)
def test_preText(self):
test = "Jesus likes all the people"
bow_pos = [
"love",
"love",
"love",
"love",
"love",
"like",
"love",
"love",
"love",
"love",
"love",
]
bow_neg = ["bad", "bad", "bad", "bad", "bad", "bad", "bad", "bad", "bad", "bad"]
# score_word_sim not evaluated because of random choises
_, score_bow = preprocess_emotion.preText(test, bow_pos, bow_neg)
emotion_out = 1.0
self.assertEqual(emotion_out, score_bow)
def test_wordSimilarity(self):
bow_pos = [
"love",
"love",
"love",
"love",
"love",
"love",
"love",
"love",
"love",
"love",
"love",
]
bow_neg = ["bad", "bad", "bad", "bad", "bad", "bad", "bad", "bad", "bad", "bad"]
verb = "romance"
out = preprocess_emotion.wordSimilarity(bow_pos, bow_neg, verb)
out_prejected = 1.0
self.assertEqual(out, out_prejected)
| 3.09375 | 3 |
dvmvs/layers.py | hashi0203/deep-video-mvs | 1 | 12767728 | <reponame>hashi0203/deep-video-mvs
import torch
def down_conv_layer(input_channels, output_channels, kernel_size):
return torch.nn.Sequential(
torch.nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=1,
bias=False),
torch.nn.BatchNorm2d(output_channels),
torch.nn.ReLU(),
torch.nn.Conv2d(
output_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=2,
bias=False),
torch.nn.BatchNorm2d(output_channels),
torch.nn.ReLU())
def up_conv_layer(input_channels, output_channels, kernel_size):
return torch.nn.Sequential(
torch.nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
torch.nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
bias=False),
torch.nn.BatchNorm2d(output_channels),
torch.nn.ReLU())
def conv_layer(input_channels, output_channels, kernel_size, stride, apply_bn_relu):
if apply_bn_relu:
return torch.nn.Sequential(
torch.nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=stride,
bias=False),
torch.nn.BatchNorm2d(output_channels),
torch.nn.ReLU(inplace=True))
else:
return torch.nn.Sequential(
torch.nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=stride,
bias=False))
def depth_layer_3x3(input_channels):
return torch.nn.Sequential(
torch.nn.Conv2d(input_channels, 1, 3, padding=1),
torch.nn.Sigmoid())
| 2.328125 | 2 |
Arithmatic/sub/Subtraction.py | MuditMaurya/Calculator | 1 | 12767729 | <filename>Arithmatic/sub/Subtraction.py<gh_stars>1-10
class Subtract:
def __init__(self,fnum,snum):
self.fnum=fnum
self.snum=snum
def allSub(self):
self.sub=self.fnum-self.snum
return self.sub | 2.90625 | 3 |
university_example.py | ericoc/indego-py-lib | 5 | 12767730 | <filename>university_example.py<gh_stars>1-10
from indego import Indego
# Instantiate the Indego class
indego = Indego()
# Retrieve and print "university" stations
uni_stations = indego.get_stations('university')
print(uni_stations)
# Show how many stations were returned above
uni_count = len(uni_stations)
print(uni_count, 'stations returned')
| 3.5 | 4 |
athanor_entity/entities/handlers.py | volundmush/athanor_entity | 0 | 12767731 | <gh_stars>0
from django.conf import settings
from evennia import GLOBAL_SCRIPTS
from evennia.utils.utils import class_from_module
from evennia.objects.objects import ObjectSessionHandler
class KeywordHandler(object):
def __init__(self, owner):
self.owner = owner
def all(self, looker=None):
pass
class BodyHandler(object):
@property
def persistent(self):
return self.owner.persistent
def __init__(self, owner):
self.owner = owner
self.forms = dict()
self.active = None
class AspectHandler(object):
def __init__(self, owner):
self.owner = owner
self.aspects = dict()
def all(self):
return self.aspects.values()
class ItemHandler(object):
def __init__(self, owner):
self.owner = owner
self.inventories = dict()
@property
def contents(self):
all = set()
for inv in self.inventories.values():
all += inv.contents
return list(all)
def all(self, inv_name=None):
if not inv_name:
return self.contents
else:
if inv_name in self.inventories:
return self.inventories[inv_name].all()
else:
return list()
def get_inventory(self, inv_name):
if (found := self.inventories.get(inv_name, None)):
return found
inv_class = class_from_module(settings.SPECIAL_INVENTORY_CLASSES.get(inv_name, settings.BASE_INVENTORY_CLASS))
new_inv = inv_class(self, inv_name)
self.inventories[inv_name] = new_inv
return new_inv
def can_add(self, entity, inv_name):
if entity in self.contents:
raise ValueError(f"{self.owner} is already carrying {entity}!")
inv = self.get_inventory(inv_name)
for aspect in self.owner.aspects.all():
if not aspect.at_before_get(entity, inv):
raise ValueError(f"{aspect} does not allow getting {entity}!")
inv.can_add(entity)
def can_transfer(self, entity, inv_name):
if entity not in self.contents:
raise ValueError(f"{self.owner} is not carrying {entity}!")
old_inv = entity.inventory_location
old_inv.can_remove(entity)
inv = self.get_inventory(inv_name)
inv.can_add(entity)
def can_remove(self, entity):
if entity not in self.contents:
raise ValueError(f"{self.owner} is not carrying {entity}!")
old_inv = entity.inventory_location
old_inv.can_remove(entity)
def add(self, entity, inv_name=None, run_checks=True):
if not inv_name:
inv_name = entity.default_inventory
if run_checks:
self.can_add(entity, inv_name)
inv = self.get_inventory(inv_name)
inv.add(entity)
self.contents.add(entity)
def transfer(self, entity, inv_name, run_checks=True):
if run_checks:
self.can_transfer(entity, inv_name)
dest = self.get_inventory(inv_name)
inv = entity.inventory_location
inv.remove(entity)
dest.add(entity)
def remove(self, entity, run_checks=True):
if run_checks:
self.can_remove(entity)
inv = entity.inventory_location
inv.remove(entity)
self.contents.remove(entity)
class EquipRequest(object):
def __init__(self, handler, entity, gearset=None, gearset_name=None, gearslot=None, gearslot_name=None, layer=None):
self.handler = handler
self.equipper = handler.owner
self.entity = entity
self.gearset = gearset
self.gearset_name = gearset_name
self.gearslot = gearslot
self.gearslot_name = gearslot_name
self.layer = layer
self.process()
def process(self):
if not self.gearset:
if not self.gearset_name:
self.gearset_name = self.entity.default_gearset
if not self.gearset_name:
raise ValueError(f"{self.entity} cannot be equipped: No GearSet to equip it to.")
self.gearset = self.handler.get_gearset(self.gearset_name)
if not self.gearslot:
if not self.gearslot_name:
self.gearslot_name = self.entity.default_gearslot
if not self.gearslot_name:
raise ValueError(f"{self.entity} cannot be equipped: No GearSlot is available for {self.gearset}!")
self.gearslot = self.gearset.get_gearslot(self.gearslot_name)
# Remember, layer 0 is a totally viable layer. We can't just check for False here.
self.layer = self.gearslot.available_layer(self.layer)
if self.layer is None:
raise ValueError(f"{self.gearslot} has no available layers!")
class GearHandler(object):
def __init__(self, owner):
self.owner = owner
self.gearsets = dict()
self.contents = set()
@property
def equipped(self):
all = set()
for inv in self.gearsets.values():
all += inv.equipped
return list(all)
def all(self, gearset_name=None):
if not gearset_name:
return list(self.contents)
else:
if gearset_name in self.gearsets:
return self.gearsets[gearset_name].all()
else:
return list()
def get_gearset(self, set_name):
if (found := self.gearsets.get(set_name, None)):
return found
inv_class = class_from_module(settings.SPECIAL_GEARSET_CLASSES.get(set_name, settings.BASE_GEARSET_CLASS))
new_inv = inv_class(self, set_name)
self.gearsets[set_name] = new_inv
return new_inv
def can_equip(self, entity):
if entity in self.contents:
raise ValueError(f"{entity} is already equipped by {self.owner}!")
if entity not in self.owner.items.contents:
raise ValueError(f"{self.owner} is not carrying {entity}!")
entity.inventory_location.can_remove(entity)
def equip(self, entity, set_name=None, set_slot=None, set_layer=None, run_checks=True):
if run_checks:
self.can_equip(entity)
request = EquipRequest(self, entity, gearset_name=set_name, gearslot_name=set_slot, layer=set_layer)
if run_checks:
for aspect in self.owner.aspects.all():
if not aspect.at_before_equip(entity, request):
raise ValueError(f"{aspect} does not allow equipping {entity}!")
self.owner.items.remove(entity)
request.gearset.equip(request)
self.contents.add(entity)
def can_unequip(self, entity):
if entity not in self.contents:
raise ValueError(f"{self.owner} is not using {entity}!")
old_gear = entity.equip_location
old_gear.can_unequip(entity)
def unequip(self, entity, inv_name=None, run_checks=True):
if run_checks:
self.can_unequip(entity)
self.owner.items.can_add(entity, inv_name)
gear = entity.equip_location
gear.remove(entity)
self.contents.remove(entity)
self.owner.items.add(entity, inv_name, run_checks=False)
class MapHandler(object):
def __init__(self, owner):
self.owner = owner
self.rooms = dict()
self.gateways = dict()
self.areas = dict()
self.loaded = False
def get_room(self, room_key):
if not self.loaded:
self.load()
return self.rooms.get(room_key, None)
def load(self):
if self.loaded:
return
if not hasattr(self.owner, 'map_bridge'):
raise ValueError(f"{self.owner} does not support an internal map!")
bri = self.owner.map_bridge
if not (plugin := GLOBAL_SCRIPTS.entity.ndb.plugins.get(bri.plugin, None)):
raise ValueError(f"Cannot load {self.owner} map data: {bri.plugin} extension not found.")
if not (inst := plugin.maps.get(bri.map_key, None)):
raise ValueError(
f"Cannot load {self.owner} map data: {bri.plugin}/{bri.map_key} map not found.")
inst_data = inst.get('map', dict())
for area_key, area_data in inst.get('areas', dict()).items():
area_class = area_data.get('class')
self.areas[area_key] = area_class(area_key, self, area_data)
for room_key, room_data in inst.get('rooms', dict()).items():
room_class = room_data.get('class')
self.rooms[room_key] = room_class(room_key, self, room_data)
for gateway_key, gateway_data in inst.get('gateways', dict()).items():
gateway_class = gateway_data.get('class')
self.gateways[gateway_key] = gateway_class(gateway_key, self, gateway_data)
for room in self.rooms.values():
room.load_exits()
self.loaded = True
def save(self):
pass
class LocationHandler(object):
def __init__(self, owner):
self.owner = owner
self.room = None
self.x = None
self.y = None
self.z = None
@property
def map(self):
if not self.room:
return None
return self.room.handler.owner
def set(self, room, save=True):
if isinstance(room, str):
room = GLOBAL_SCRIPTS.entity.resolve_room_path(room)
if room and not hasattr(room, 'map'):
return
# raise ValueError(f"{room} is not a valid location for a game entity.")
if room and room == self.room:
return
old_room = self.room
if old_room:
old_room.entities.remove(self.owner)
old_room.at_unregister_entity(self.owner)
if not room or room.handler.owner != old_room.handler.owner:
old_room.handler.owner.entities.remove(self.owner)
old_room.handler.owner.at_unregister_entity(self.owner)
self.room = room
if room:
if not old_room or old_room.map != room.map:
room.handler.owner.entities.add(self.owner)
room.handler.owner.at_register_entity(self.owner)
room.entities.add(self.owner)
room.at_register_entity(self.owner)
if room and save and room.fixed:
self.save()
def save(self, name="logout"):
if not self.owner.persistent:
return
if not self.room:
return
if not self.room.fixed:
raise ValueError("Cannot save to a non-fixed room.")
if (loc := self.owner.saved_locations.filter(name=name).first()):
loc.map = self.map
loc.room_key = self.room.unique_key
loc.x_coordinate = self.x
loc.y_coordinate = self.y
loc.z_coordinate = self.z
loc.save()
else:
self.owner.saved_locations.create(name=name, map=self.map, room_key=self.room.unique_key,
x_coordinate=self.x, y_coordinate=self.y, z_coordinate=self.z)
def recall(self, name="logout"):
if not self.owner.persistent:
return
if not (loc := self.owner.saved_locations.filter(name=name).first()):
raise ValueError(f"No saved location for {name}")
self.owner.move_to(loc.map.map.get_room(loc.room_key))
class FactionHandler(object):
def __init__(self, owner):
self.owner = owner
def is_member(self, faction, check_admin=True):
def recursive_check(fact):
checking = fact
while checking:
if checking == faction:
return True
checking = checking.db_parent
return False
if hasattr(faction, 'faction_bridge'):
faction = faction.faction_bridge
if check_admin and self.owner.is_admin():
return True
if self.factions.filter(db_faction=faction).count():
return True
all_factions = self.factions.all()
for fac in all_factions:
if recursive_check(fac.db_faction):
return True
return False
class AllianceHandler(object):
def __init__(self, owner):
self.owner = owner
class DivisionHandler(object):
def __init__(self, owner):
self.owner = owner
| 2.078125 | 2 |
test/TestRelationalPickle.py | mjpan/pypatterns | 0 | 12767732 | from __future__ import with_statement
import os
import pickle
import sys
import unittest
import logging
APP_ROOT = os.getenv('APP_ROOT')
import currypy
#import pypatterns.filter as FilterModule
#import pypatterns.relational as RelationalModule
sys.path.insert(0,"../data")
class TestCase(unittest.TestCase):
"""
COLUMNS = ['column1', 'column2', 'column3']
PICKLE_PATH = os.path.sep + os.path.join('tmp', 'TestRelationalPickle.pickle')
def setUp(self):
return
def tearDown(self):
if os.path.exists(TestCase.PICKLE_PATH):
os.unlink(TestCase.PICKLE_PATH)
return
def testTable(self):
columns = TestCase.COLUMNS
table = RelationalModule.createTable('test',columns)
rowValuesList = [
[1,2,3],
[1,'2','3'],
[None,None,[]]
]
for rowValues in rowValuesList:
row = table.addRow()
map(row.setColumn, columns, rowValues)
self.assertPickleable(table)
unpickledTable = self.assertJsonPickleable(table)
self.assertEquals(table.rowCount(), unpickledTable.rowCount())
for actualValues, expectedValues in zip(unpickledTable.retrieve(columns=['column1', 'column2', 'column3']), rowValuesList):
self.assertEquals(actualValues, expectedValues)
return
def testRow(self):
columns = TestCase.COLUMNS
table = RelationalModule.createTable('test',columns)
expectedValues = [1,2,3]
row1 = table.addRow()
map(row1.setColumn, columns, expectedValues)
for column, expectedValue in zip(columns, expectedValues):
assert row1.getColumn(column) == expectedValue
pass
self.assertPickleable(row1)
unpickledRow = self.assertJsonPickleable(row1)
self.assertEquals(row1.values(), unpickledRow.values())
return
def assertPickleable(self, objectToPickle):
with open(TestCase.PICKLE_PATH, 'w') as f:
pickle.dump(objectToPickle, f)
with open(TestCase.PICKLE_PATH, 'r') as f:
newObject = pickle.load(f)
return
def assertJsonPickleable(self, objectToPickle):
import jsonpickle
pickle = jsonpickle.encode(objectToPickle)
unpickledObject = jsonpickle.decode(pickle)
return unpickledObject
"""
# END class TestCase
pass
def main():
suite = unittest.makeSuite(TestCase,'test')
runner = unittest.TextTestRunner()
runner.run(suite)
return
if __name__=="__main__":
main()
| 2.453125 | 2 |
setup.py | UCSC-nanopore-cgl/NaRLE | 0 | 12767733 | <filename>setup.py
from version import version, required_versions
from setuptools import find_packages, setup
kwargs = dict(
name='toil-narle',
version=version,
description="UCSC CGL Nanopore Toil pipeiline",
author='UCSC Computational Genomics Lab',
author_email='<EMAIL>',
url="https://github.com/",
install_requires=[x + y for x, y in required_versions.iteritems()],
tests_require=['pytest==2.8.3'],
package_dir={'': 'src'},
packages=find_packages('src'),
entry_points={
'console_scripts': ['toil-narle = narle.narle_pipeline:main']})
setup(**kwargs)
| 1.351563 | 1 |
code/day08_joe.py | denis-spe/my_advent2020 | 1 | 12767734 | from __future__ import annotations
from typing import NamedTuple, List
RAW = """nop +0
acc +1
jmp +4
acc +3
jmp -3
acc -99
acc +1
jmp -4
acc +6"""
class Instruction(NamedTuple):
op: str
arg: int
@staticmethod
def parse(line: str) -> Instruction:
op, arg = line.strip().split(" ")
return Instruction(op, int(arg))
INSTRUCTIONS = [Instruction.parse(line) for line in RAW.split("\n")]
class Booter:
def __init__(self, instructions: List[Instruction]) -> None:
self.instructions = instructions
self.accumulator = 0
self.idx = 0
self.executed = set()
def execute_one(self) -> None:
op, arg = self.instructions[self.idx]
if op == "acc":
self.accumulator += arg
self.idx += 1
elif op == "jmp":
self.idx += arg
elif op == "nop":
self.idx += 1
else:
raise ValueError(f"Unknow op {op}")
def run_until_repeat(self) -> None:
executed = set()
while self.idx not in executed:
executed.add(self.idx)
self.execute_one()
def does_terminate(self) -> bool:
executed = set()
while self.idx not in executed:
if self.idx == len(self.instructions):
return True
executed.add(self.idx)
self.execute_one()
return False
BOOTER = Booter(INSTRUCTIONS)
BOOTER.run_until_repeat()
assert BOOTER.accumulator == 5
with open('/Volumes/GoogleDrive/My Drive/my_advent2020/inputs/day08_part2.txt') as f:
raw = f.read()
instructions = [Instruction.parse(line) for line in raw.split('\n')]
booter = Booter(instructions)
booter.run_until_repeat()
print(booter.accumulator)
def find_terminator(instructions: List[Instruction]) -> int:
for i, (op, arg) in enumerate(instructions):
subbed = instructions[:]
if op == 'nop':
subbed[i] = Instruction('jmp', arg)
elif op == 'jmp':
subbed[i] = Instruction('nop', arg)
else:
continue
booter = Booter(subbed)
if booter.does_terminate():
return booter.accumulator
raise RuntimeError("Never terminated")
assert find_terminator(INSTRUCTIONS) == 8
print(find_terminator(instructions))
| 3.234375 | 3 |
pokeyadventure/game_objects/tiles.py | wnormandin/PokeyAdventure | 0 | 12767735 | <filename>pokeyadventure/game_objects/tiles.py
import pygame
import os
import random
import sys
import pdb
from .core import GameObject
class Tile(GameObject):
def __init__(self, pos):
super().__init__(pos)
self.passable = True # Determines whether char can pass through tile
self.locked = False
self.image_base_path = 'resources/textures'
self.image_filename = 'black_wood_wall_25px.png'
self.traps = []
self.metachar = '#'
self.explored = False # Changes to true when the player has visited this tile
self.max_alpha = 255
self.fog = None
def visible(self, p):
if min(self._pt_distance(p.position)) < 5:
self.fog.set_alpha(0)
return True
else:
self.fog.set_alpha(self.max_alpha)
return False
def explore(self):
self.max_alpha = 120
self.fog.set_alpha(120)
def _get_fog(self):
# Creates the fog layer for this particular cell
y, x = self.position
fog = pygame.Surface((x * self.CELL_SIZE, y * self.CELL_SIZE))
fog.set_alpha(self.max_alpha)
fog.fill((0, 0, 0))
return fog
def _load_image(self):
file_path = os.path.join(os.getcwd(), self.image_base_path)
file_path = os.path.join(file_path, self.image_filename)
self.image = pygame.image.load(file_path)
class Floor(GameObject):
def __init__(self, pos):
super().__init__(pos)
self.damage = 0 # Assign for DPS
self.false = False # Indicate whether this is a false door
self.false_image = ''
class Door(GameObject):
def _toggle_lock(self):
# Using MI, ensures 2nd parent __init__ is called
self.locked = not self.locked
self.passable = not self.locked
class Wall(GameObject):
def __init__(self, pos):
super().__init__(pos)
self.passable = False
class MossyFloor(Tile, Floor):
def __init__(self, pos):
super().__init__(pos)
self.image_filename = 'mossy_floor_25px.png'
self._load_image()
self.metachar = 'm'
class BarnDoor(Tile, Door):
def __init__(self, pos):
super().__init__(pos)
self.image_filename = 'barn_door_25px.png'
self._load_image()
self.metachar = 'D'
class LavaFloor(Tile, Floor):
def __init__(self, pos):
super().__init__(pos)
self.image_filename = 'fire_ground_25px.png'
self._load_image()
self.metachar = 'L'
self.damage = 1
class WoodWall(Tile, Wall):
def __init__(self, pos):
super().__init__(pos)
self.image_filename = 'black_wood_wall_25px.png'
self._load_image()
self.metachar = 'W'
class ScreenFloor(Tile, Floor):
def __init__(self, pos):
super().__init__(pos)
self.image_filename = 'dark_screen_25px.png'
self._load_image()
self.metachar = 's'
class SnowFloor(Tile, Floor):
def __init__(self, pos):
super().__init__(pos)
self.image_filename = 'snowy_path_25px.png'
self._load_image()
self.metachar = 'S'
class BrickWall(Tile, Wall):
def __init__(self, pos):
super().__init__(pos)
self.image_filename = 'red_brick_25px.png'
self._load_image()
self.metachar = 'w'
class WoodDoor(Tile, Door):
def __init__(self, pos):
super().__init__(pos)
self.image_filename = 'wooden_door_25px.png'
self._load_image()
self._toggle_lock() # Start locked
self.metachar = 'd'
class Curtain(Tile):
RED = 0
BLUE = 1
def __init__(self, color=0):
super().__init__()
template = 'curtain_{}_25px.png'
if color == Curtain.RED:
template.format('red')
elif color == Curtain.BLUE:
template.format('blue')
else:
raise ValueError("Color {} not found".format(color))
self._load_image()
self.metachar = 'C'
class TudorWall(Tile, Wall):
def __init__(self, pos):
super().__init__(pos)
self.image_filename = 'interior_wood_wall_25px.png'
self._load_image()
self.metachar = 'T'
class BrickFloor(Tile):
def __init__(self, pos):
super().__init__(pos)
self.image_filename = 'gray_brick_25px.png'
self._load_image()
self.metachar = 'B'
| 2.734375 | 3 |
d7a/system_files/dll_config.py | christophevg/pyd7a | 3 | 12767736 | <gh_stars>1-10
import struct
from d7a.support.schema import Validatable, Types
from d7a.system_files.system_file_ids import SystemFileIds
class DllConfigFile(Validatable):
SCHEMA = [{
"active_access_class": Types.INTEGER(min=0, max=20),
"vid": Types.INTEGER(min=0, max=0xFFFF)
# TODO others
}]
def __init__(self, active_access_class=0, vid=0xFFFF):
self.active_access_class = active_access_class
self.vid = vid
super(DllConfigFile, self).__init__()
def file_id(self): # TODO base class
return SystemFileIds.DLL_CONFIG
@staticmethod
def parse(s):
ac = s.read("uint:8")
vid = s.read("uint:16")
return DllConfigFile(active_access_class=ac, vid=vid)
def __iter__(self):
yield self.active_access_class
for byte in bytearray(struct.pack(">H", self.vid)):
yield byte
| 2.28125 | 2 |
snips_nlu/tests/test_version.py | ddorian/snips-nlu | 1 | 12767737 | <filename>snips_nlu/tests/test_version.py
from semantic_version import Version
from snips_nlu import __model_version__, __version__
from snips_nlu.tests.utils import SnipsTest
class TestVersion(SnipsTest):
def test_version_should_be_semantic(self):
# Given
version = __version__
# When
valid = False
try:
Version(version)
valid = True
except ValueError:
pass
# Then
self.assertTrue(valid, "Version number '%s' is not semantically valid"
% version)
def test_model_version_should_be_semantic(self):
# Given
model_version = __model_version__
# When
valid = False
try:
Version(model_version)
valid = True
except ValueError:
pass
# Then
self.assertTrue(valid, "Version number '%s' is not semantically valid"
% model_version)
| 2.5625 | 3 |
flask_test.py | Zihang97/News_Analyzer_app | 1 | 12767738 | from flask import Flask, escape, request, redirect, url_for, render_template
from database import *
from news import *
app = Flask(__name__)
@app.route('/register/', methods=['GET','POST'])
def regist():
if request.method =='POST':
username = request.form['username']
password = request.form['password']
repassword = request.form['repassword']
users = GETUSER()
if password == repassword:
if username in users:
return 'user already exist'
else:
insertuser(username, password)
createtable(username)
#after register userinformation is saved in user list as a dictionary
return redirect('/')
#user will be redirect to login page after register.
else:
return('password should be identical to repassword')
return render_template('regist.html')
@app.route('/', methods=['GET','POST'])
def index():
return render_template('index.html')
#this is the login page, we post our information and it can check wheter our information is in user list
@app.route('/login', methods=['GET','POST'])
def login():
if request.method =='POST':
username = request.form['username']
password = request.form['password']
users = GETUSER()
if username in users:
if password == users[username]:
return redirect(url_for('main', name = username))
else:
return render_template('login_return.html', text = 'Wrong Password!')
else:
return render_template('login_return.html', text = 'Username Not Found, please register first!')
return render_template('login.html')
#check if we have user information in our list, if we do the user is successfully login.
#else, he or she either does not regist or enters wrong information
@app.route('/mainpage/<name>', methods = ['GET', 'POST'])
def main(name):
return render_template('mainpage.html', name = name)
@app.route('/profile/<name>', methods = ['GET', 'POST'])
def profile(name):
filenames = GETALL(password, name)
return render_template('profile.html', name = name, filenames = filenames)
@app.route('/file/display/<name>/<filename>', methods = ['GET', 'POST'])
def display(name, filename):
file, content = GET(password, name, filename)
return render_template('display_file.html', name=name, file=file, content=content)
@app.route('/uploader/<name>', methods = ['GET', 'POST'])
def uploader(name):
if request.method == 'POST':
f = request.files['file']
f.save('./File_buffer/' + f.filename)
POST(password, name, './File_buffer/' + f.filename, f.filename)
return render_template("return.html", name=name)
@app.route('/file/file_update/<name>', methods = ['GET', 'POST'])
def update(name):
return render_template('update.html', name=name)
@app.route('/file/file_update_result/<name>', methods = ['GET', 'POST'])
def updating(name):
if request.method == 'POST':
creator = request.form['author']
new_content = request.form['new_content']
PUT(password, name, creator, new_content)
return redirect(url_for('main', name = name))
@app.route('/file/file_delete/<name>', methods = ['GET', 'POST'])
def delete(name):
return render_template('delete.html', name=name)
@app.route('/file/file_deleting/<name>', methods = ['GET', 'POST'])
def deleting(name):
if request.method == 'POST':
creator = request.form['author']
DELETE(password, name, creator)
return redirect(url_for('main', name = name))
@app.route('/file/file_query/<name>', methods = ['GET', 'POST'])
def query(name):
if request.method == 'POST':
keyword = request.form['keyword']
pass_key = keyword + ' '
results = search(password, name, pass_key)
return render_template('display.html', name=name, results = results, keyword = pass_key)
@app.route('/news/<name>', methods = ['GET', 'POST'])
def news(name):
return render_template('news_search.html', name=name)
@app.route('/news/query/<name>', methods = ['GET', 'POST'])
def news_query(name):
if request.method == 'POST':
keyword = request.form['news_keyword']
pagenum = request.form['page']
title, date, link = search_news(keyword, int(pagenum))
if title != '':
return render_template('news_display.html', name=name,title=title, date=date, link=link)
else:
return render_template('news_display.html', name=name, title='No file matched', date=date, link=link)
if __name__ == '__main__':
app.run()
| 3.171875 | 3 |
tcc-teste.py | felipetomm/POX-Django | 1 | 12767739 | <reponame>felipetomm/POX-Django
#!/usr/bin/python2.7
import time
from threading import Thread
import threading, Queue
class cabeca(object):
def __init__(self):
self.nome = None
def check_infos(user_id, queue):
result = user_id+5*2+4*20
queue.put(result)
def soma(i):
return i+5*2+4*20
queued_request = Queue.Queue()
lista_teste = []
lista_teste_2 = []
for i in range(6000):
lista_teste.append(i+3)
lista_teste_2.append(i+10)
tempo = time.clock()
for i in range(6000):
check_infos_thread = threading.Thread(target=check_infos, args=(lista_teste[i], queued_request))
check_infos_thread.start()
final_result = queued_request.get()
lista_teste[i] = final_result
print "Tempo Thread %s"%(time.clock()-tempo)
tempo = time.clock()
for i in range(6000):
teste = soma(lista_teste_2[i])
lista_teste_2[i] = teste
print "Tempo Normal %s"%(time.clock()-tempo)
#print lista_teste
cabeca = cabeca()
cabeca.nome = "Felipe"
for i in range(2):
lista_teste[i] = cabeca
print lista_teste[0].nome
| 2.609375 | 3 |
TwitterAnalysis/config.py | victorreyes93/Udacity-DevOps-Capstone-Project | 0 | 12767740 | KEY = '<KEY>
| 1.132813 | 1 |
src/nhl_analysis/example.py | pjordan34/nhl_analysis | 0 | 12767741 | import pandas as pd
from aggregate import team_goals
from transform import transform
pbp = pd.read_csv('data/nhl_pbp20172018.csv')
# note that you can use the "uncleaned pbp files in this code,
# you just will not be able to index on the the standard three-letter abbreviations for all teams
print(team_goals(pbp))
pbp = transform(pbp)
print(pbp.head()) | 2.6875 | 3 |
lstm/model.py | KalllN/stock-price-lstm | 0 | 12767742 | <reponame>KalllN/stock-price-lstm
model = Sequential()
model.add(LSTM(50, return_sequences = True, input_shape = (x_train.shape[1], 1)))
model.add(LSTM(50, return_sequences = False))
model.add(Dense(25))
model.add(Dense(1))
#Compiling the model
model.compile(optimizer = 'adam', loss = 'mean_squared_error')
#using rmse
| 3.109375 | 3 |
replica/contrib/whisper/urls.py | underlost/Replica | 0 | 12767743 | <reponame>underlost/Replica<filename>replica/contrib/whisper/urls.py
from __future__ import absolute_import
from django.conf.urls import *
from django.views.generic import TemplateView
from django.views.decorators.cache import cache_page
urlpatterns = patterns('',
)
| 1.304688 | 1 |
importers/unsdg_indicator.py | codeforIATI/codelist-updater | 1 | 12767744 | <filename>importers/unsdg_indicator.py
from .helpers import Importer
def run():
url = 'https://docs.google.com/spreadsheets/d/1o1SQDqfFTBgUJO2k83mfFlLtgpwUUkEbc3gjRgIWJSo/export?format=csv&id=1o1SQDqfFTBgUJO2k83mfFlLtgpwUU<KEY>WJSo&gid=1990382168'
lookup = [
('code', 'code'),
('name_en', 'name_en'),
('name_fr', 'name_fr'),
('category', 'category'),
('@status', 'status'),
]
Importer('UNSDG-Indicators', url, lookup)
if __name__ == '__main__':
run()
| 2.234375 | 2 |
pieces/knight.py | Jone1/chessPyQt | 0 | 12767745 | <reponame>Jone1/chessPyQt
from pieces.piece import AbstractPiece
__author__ = 'Jone'
class Knight(AbstractPiece):
src_white = "D:/workspace/chessQt/chessQt/gfx/nw.png"
src_black = "D:/workspace/chessQt/chessQt/gfx/nb.png"
def __init__(self, x, y, color):
super(Knight, self).__init__(x, y, color)
def moveValidator(self, x, y):
if abs(self.x - x) == 2 and abs(self.y - y) == 1:
return True
elif abs(self.x - x) == 1 and abs(self.y - y) == 2:
return True
return False | 3.09375 | 3 |
notebook/utils/.ipynb_checkpoints/viz-checkpoint.py | aws-samples/automl-blueprint | 3 | 12767746 | <gh_stars>1-10
import json
import os
import matplotlib.pyplot as plt
import ipywidgets as widgets
from ipywidgets import interact, interactive, fixed, interact_manual
import seaborn as sns
import pandas as pd
import numpy as np
import shap
from sklearn.metrics import roc_curve, auc, RocCurveDisplay, confusion_matrix
import boto3
from sagemaker.s3 import S3Downloader
class ModelInspector() :
def __init__(self, config) :
self.bucket = config["workspace"]
self.results_prefix = config["prefixes"]["results_path"]
self.bias_prefix = config["prefixes"]["bias_path"]
self.xai_prefix = config["prefixes"]["xai_path"]
self.gt_idx = config["results-config"]["gt_index"]
self.pred_idx = config["results-config"]["pred_index"]
self.s3 = boto3.client("s3")
self.results_df =self._get_merged_df(self.bucket, self.results_prefix)
def get_results(self) :
return self.results_df
def _get_merged_df(self, bucket, prefix, has_header=True, maxkeys=10) :
files = []
skip = 0
kwargs = {'Bucket': bucket, 'Prefix': prefix, 'MaxKeys': maxkeys}
resp = self.s3.list_objects_v2(**kwargs)
for obj in resp['Contents'] :
if (has_header) :
skip = 1
files.append(pd.read_csv("s3://{}/{}".format(bucket, obj["Key"]), skiprows=skip, header=None))
df = pd.concat(files)
return df
def get_roc_curve(self, gt_index=0, pred_index=1, display=True, model_name="autopilot-model") :
y = self._y()
yh = self._yh()
fpr, tpr, thresholds = roc_curve(y, yh)
roc_auc = auc(fpr, tpr)
viz = RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=roc_auc, estimator_name=model_name)
if display :
viz.plot()
return viz, roc_auc, fpr, tpr, thresholds
def visualize_auc(self, fpr, tpr, thresholds) :
df = pd.DataFrame({
"False Positive Rate":fpr,
"True Positive Rate":tpr,
"Threshold":thresholds
})
axes = df.plot.area(stacked=False, x="Threshold", figsize=(20,3),colormap='RdGy', alpha=0.3)
axes.set_xlabel("Threshold")
axes.set_ylabel("Rate")
axes.set_xlim(0,1.0)
axes.set_ylim(0,1.0)
def _y(self) :
return self.results_df[self.gt_idx]
def _yh(self) :
return self.results_df[self.pred_idx]
def display_interactive_cm(self, start=0.5, min=0.0, max=1.0, step=0.05) :
y = self._y()
yh = self._yh()
def cm_heatmap_fn(Threshold) :
cm = confusion_matrix(y, yh >= Threshold).astype(int)
names = ['True Neg','False Pos','False Neg','True Pos']
counts = ["{0:0.0f}".format(value)
for value in cm.flatten()]
pcts = ["{0:.2%}".format(value)
for value in cm.flatten()/np.sum(cm)]
labels = [f"{v1}\n{v2}\n{v3}"
for v1, v2, v3 in zip(names,counts,pcts)]
labels = np.asarray(labels).reshape(2,2)
sns.heatmap(cm, annot=labels, fmt='', cmap='Blues')
thresh_slider = widgets.FloatSlider(value=start,
min=min,
max=max,
step=step)
interact(cm_heatmap_fn, Threshold=thresh_slider)
def _download_clarify_xai_summary(self) :
try :
summary_uri = f"s3://{self.bucket}/{self.xai_prefix}/analysis.json"
S3Downloader.download(summary_uri, os.getcwd())
with open('analysis.json', 'r') as f:
summary = json.loads(f.read())
return summary
except Exception as e:
print(f"{e}: Failed to download {xai_summary}")
def explain_prediction(self, data_row_id) :
xai_summary = self._download_clarify_xai_summary()
columns = list(xai_summary['explanations']['kernel_shap']['label0']["global_shap_values"].keys())
xai_results = f"s3://{self.bucket}/{self.xai_prefix}/explanations_shap/out.csv"
shap_df = pd.read_csv(xai_results)
y = self._y()
yh = self._yh()
descr = "Yes, this client opened a term deposit. " if (y.iloc[data_row_id]) else "No, this client did not open a term deposit. "
descr+= "The model predicts that the probability that this prospect will open a deposit is {:3f}. \n".format(yh.iloc[data_row_id])
print(descr)
expected_value = xai_summary['explanations']['kernel_shap']['label0']['expected_value']
shap.force_plot(expected_value, np.array(shap_df.iloc[data_row_id,:]), np.array(columns), matplotlib=True) | 2.078125 | 2 |
aws-hub-account/functions/aws/fetch-aws-findings.py | infviz-io/cloud-security-console | 0 | 12767747 | <filename>aws-hub-account/functions/aws/fetch-aws-findings.py
import json
import os
import boto3
import stdFn
def request_handler(event,context):
#print(event)
try:
# Get security hub findings
shClient=boto3.client('securityhub')
findings=shClient.get_findings(MaxResults=100)['Findings']
# Setup connection with Findings DB
dynamoName=os.environ['DBAWSFindings']
client=boto3.resource('dynamodb')
table=client.Table(dynamoName)
except Exception as e:
print("Issue getting findings (%s)" %(e))
return False
grpFindings=list(stdFn.divide_chunks(findings,25))
i=0
for grpFinding in grpFindings:
i+=stdFn.store_dynamo(table,grpFinding)
accountId=boto3.client('sts').get_caller_identity().get('Account')
stdFn.envStatus(os.environ['DBEnv'],'MASTER',accountId,'Processed %s events' %(i))
return {"Processed":i}
| 2.046875 | 2 |
tests/components/camera/test_dispatcher.py | Norien/Home-Assistant | 2 | 12767748 | """The tests for dispatcher camera component."""
import asyncio
from homeassistant.setup import async_setup_component
from homeassistant.helpers.dispatcher import async_dispatcher_send
@asyncio.coroutine
def test_run_camera_setup(hass, test_client):
"""Test that it fetches the given dispatcher data."""
yield from async_setup_component(hass, 'camera', {
'camera': {
'platform': 'dispatcher',
'name': 'dispatcher',
'signal': 'test_camera',
}})
client = yield from test_client(hass.http.app)
async_dispatcher_send(hass, 'test_camera', b'test')
yield from hass.async_block_till_done()
resp = yield from client.get('/api/camera_proxy/camera.dispatcher')
assert resp.status == 200
body = yield from resp.text()
assert body == 'test'
async_dispatcher_send(hass, 'test_camera', b'test2')
yield from hass.async_block_till_done()
resp = yield from client.get('/api/camera_proxy/camera.dispatcher')
assert resp.status == 200
body = yield from resp.text()
assert body == 'test2'
| 2.546875 | 3 |
src/test_results_parsing/test_cutest_parser.py | AAU-PSix/canary | 0 | 12767749 | import unittest
from test_results_parsing.parser_cutest import CuTestParser, FailedCuTest
from typing import List
from . import *
class TestCutestParser(unittest.TestCase):
def setUp(self) -> None:
return super().setUp()
def test_parse_string_with_colon(self) -> None:
# Arrange
self._parser = CuTestParser()
parsed_line = ["5) Test_CuAssertPtrEquals: /input/tests/AllTests.c:55: expected <Test Hest: Blæst> but was <Pøls: 1 2 3>"]
expected_expected = "Test Hest: Blæst"
expected_actual = "Pøls: 1 2 3"
# Act
failed_cutests = self._parser.parse(parsed_line)
actual_expected = failed_cutests[0].expected
actual_actual = failed_cutests[0].actual
# Assert
self.assertEqual(expected_expected, actual_expected)
self.assertEqual(actual_actual, expected_actual)
def test_parse_int_result(self) -> None:
# Arrange
self._parser = CuTestParser()
parsed_line = ["5) Test_CuAssertPtrEquals: /input/tests/AllTests.c:55: expected <100> but was <69>"]
expected_expected = "100"
expected_actual = "69"
# Act
failed_cutests = self._parser.parse(parsed_line)
actual_expected = failed_cutests[0].expected
actual_actual = failed_cutests[0].actual
# Assert
self.assertEqual(expected_expected, actual_expected)
self.assertEqual(actual_actual, expected_actual)
def test_parse_double_result(self) -> None:
# Arrange
self._parser = CuTestParser()
parsed_line = ["5) Test_CuAssertPtrEquals: /input/tests/AllTests.c:55: expected <200.00> but was <69.00>"]
expected_expected = "200.00"
expected_actual = "69.00"
# Act
failed_cutests = self._parser.parse(parsed_line)
actual_expected = failed_cutests[0].expected
actual_actual = failed_cutests[0].actual
# Assert
self.assertEqual(expected_expected, actual_expected)
self.assertEqual(actual_actual, expected_actual)
def test_parse_pointer_result(self) -> None:
# Arrange
self._parser = CuTestParser()
parsed_line = ["5) Test_CuAssertPtrEquals: /input/tests/AllTests.c:55: expected pointer <0x0x16c17e0> but was <0x0x16c1800>"]
expected_expected = "0x0x16c17e0"
expected_actual = "0x0x16c1800"
# Act
failed_cutests = self._parser.parse(parsed_line)
actual_expected = failed_cutests[0].expected
actual_actual = failed_cutests[0].actual
# Assert
self.assertEqual(expected_expected, actual_expected)
self.assertEqual(actual_actual, expected_actual)
def test_parse_assert_result(self) -> None:
# Arrange
self._parser = CuTestParser()
parsed_line = ["5) Test_CuAssertGuguGaga: /input/tests/AllTests.c:55: assert failed"]
expected_expected = "true"
expected_actual = "false"
# Act
failed_cutests = self._parser.parse(parsed_line)
actual_expected = failed_cutests[0].expected
actual_actual = failed_cutests[0].actual
# Assert
self.assertEqual(expected_expected, actual_expected)
self.assertEqual(actual_actual, expected_actual)
def test_parse_no_testname(self) -> None:
# Arrange
self._parser = CuTestParser()
parsed_line = ["/input/tests/AllTests.c:55: assert failed"]
expected_fail_list: List[FailedCuTest] = []
# Act
failed_cutests = self._parser.parse(parsed_line)
# Assert
self.assertEqual(failed_cutests, expected_fail_list)
def test_parse_no_testmessage(self) -> None:
# Arrange
self._parser = CuTestParser()
parsed_line = ["500) Test_CuAssertHest: /input/tests/AllTests.c:55: Noget helt andet"]
expected_fail_list: List[FailedCuTest] = [None]
# Act
failed_cutests = self._parser.parse(parsed_line)
# Assert
self.assertEqual(failed_cutests, expected_fail_list)
def test_single_parse_pointer(self) -> None:
# Arrange
self._parser = CuTestParser()
parsed_line = "5) Test_CuAssertPtrEquals: /input/tests/AllTests.c:55: expected pointer <0x0x16c17e0> but was <0x0x16c1800>"
expected_expected = "0x0x16c17e0"
expected_actual = "0x0x16c1800"
# Act
failed_cutest = self._parser.parse_single_line(parsed_line)
actual_expected = failed_cutest.expected
actual_actual = failed_cutest.actual
# Assert
self.assertEqual(expected_expected, actual_expected)
self.assertEqual(actual_actual, expected_actual)
def test_single_single_parse_int(self) -> None:
# Arrange
self._parser = CuTestParser()
parsed_line = "1) addTest_1_1: /input/tests/AllTests.c:25: expected <12> but was <1>"
expected_expected = "12"
expected_actual = "1"
# Act
failed_cutest = self._parser.parse_single_line(parsed_line)
actual_expected = failed_cutest.expected
actual_actual = failed_cutest.actual
# Assert
self.assertEqual(expected_expected, actual_expected)
self.assertEqual(actual_actual, expected_actual) | 3.03125 | 3 |
ASK/fixtures/params.py | dmchu/ASK_Selenium | 0 | 12767750 | import os
import random
cwd = os.path.abspath(os.getcwd())
location_chrome = "../browsers/chromedriver"
location_firefox = "../browsers/geckodriver"
DOMAIN = "http://local.school.portnov.com:4520/#"
browsers = [
"chrome",
"firefox"
]
BROWSER_TYPE = random.choice(browsers)
CHROME_EXECUTABLE_PATH = os.path.join(cwd, location_chrome)
FIREFOX_EXECUTABLE_PATH = os.path.join(cwd, location_firefox)
EXPLICIT_TIMEOUT = 10
# Just example of some othe timeouts
# SLOW_TIMEOUT = 30
| 2.328125 | 2 |