max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
python/mergePDFs.py
|
jbelenag/scripts
| 0
|
12774151
|
<reponame>jbelenag/scripts<filename>python/mergePDFs.py
# -*- coding: utf-8 -*-
#####################################################
# name: mergePDFs.py #
# author: <NAME> #
# description: given a root directory, merges #
# all the founded .pdf files #
# following alphabetical order #
# pyPdf2 is needed: pip install pyPdf2 #
#####################################################
"""
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# mergePDFs.py :: given a root directory, merges all the founded .pdf files
# following alphabetical order
#
# Version: 1.00
#
# Usage:
# mergePDFs.py
# -----------------------------------------------------------------------------
"""
import time
import sys
import os
from PyPDF2 import PdfFileMerger, PdfFileReader
def main():
print("# Starting script at: " + time.strftime("%Y-%m-%d %H:%M:%S"))
# raw strings
rootdir = r'C:\tmp'
outputfile = 'output.pdf'
# removes output file
if os.path.exists(outputfile):
os.remove(outputfile)
merger = PdfFileMerger()
for subdir, dirs, files in os.walk(rootdir):
for f in files:
if f.endswith(".pdf"):
filename = os.path.join(subdir, f)
print(filename)
merger.append(PdfFileReader(filename, 'rb'))
merger.write(outputfile)
print("# Finishing script at: " + time.strftime("%Y-%m-%d %H:%M:%S"))
# Program start
if __name__ == '__main__':
if not len(sys.argv) == 1:
print(__doc__)
sys.exit(1)
main()
| 2.75
| 3
|
scripts/varsome_api_annotate_vcf.py
|
definitelysean/varsome-api-client-python
| 23
|
12774152
|
#!/usr/bin/env python3
# Copyright 2018 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from varsome_api.vcf import VCFAnnotator
__author__ = 'ckopanos'
def annotate_vcf(argv):
parser = argparse.ArgumentParser(description='VCF Annotator command line')
parser.add_argument('-k', help='Your key to the API', type=str, metavar='API Key', required=True)
parser.add_argument('-g', help='Reference genome either hg19 or hg38', type=str, metavar='Reference Genome',
required=False, default='hg19')
parser.add_argument('-i',
help='Path to vcf file',
type=str, metavar='Input VCF File', required=True)
parser.add_argument('-o',
help='Path to output vcf file',
type=str, metavar='Output VCF File', required=False)
parser.add_argument('-p',
help='Request parameters e.g. add-all-data=1 expand-pubmed-articles=0',
type=str, metavar='Request Params', required=False, nargs='+')
parser.add_argument('-t', help='Run vcf annotator using x threads', type=int, default=3, required=False,
metavar='Number of threads')
args = parser.parse_args()
api_key = args.k
vcf_file = args.i
output_vcf_file = args.o
ref_genome = args.g
num_threads = args.t
request_parameters = None
if args.p:
request_parameters = {param[0]: param[1] for param in [param.split("=") for param in args.p]}
vcf_annotator = VCFAnnotator(api_key=api_key, ref_genome=ref_genome, get_parameters=request_parameters,
max_threads=num_threads)
vcf_annotator.annotate(vcf_file, output_vcf_file)
if __name__ == "__main__":
annotate_vcf(sys.argv[1:])
| 2.390625
| 2
|
setup.py
|
kant/4bars
| 0
|
12774153
|
#!/usr/bin/env python
from setuptools import setup
import subprocess
import sys
import pkg_resources
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_semantic_version():
global VERSION
proc1 = subprocess.Popen("git describe --tags", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out = proc1.communicate()
if proc1.returncode != 0:
sys.stdout.write("fourbars must install from cloned folder. make sure .git folder exists\n")
sys.stdout.write(out[1])
raise SystemExit(32)
v = out[0].decode('ascii').replace('\n', '')
if v.startswith('v.'):
v = v[2:]
elif v.startswith('v'):
v = v[1:]
li = v.split('.')
lii = li[1].split('-')
if len(lii) == 3:
v = '{0}.{1}.{2}'.format(li[0],lii[0],lii[1])
else:
v = '{0}.{1}'.format(li[0], li[1])
return v
VERSION = get_semantic_version()
setup(
name = 'fourbars',
version = VERSION,
description = 'Ableton Live CLI - High Precision Loop Production and Asset Management',
long_description = long_description,
long_description_content_type = "text/markdown",
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/styk-tv/4bars',
packages = ['fourbars'],
install_requires = [
'Cython==0.29.13',
'pyliblo >= 0.9.1',
'termcolor==1.1.0',
'randomnames@git+https://github.com/styk-tv/python-randomnames.git@beaa1<PASSWORD>3bf03ac5bc6f3ace2eaed119585f80#egg=randomnames',
'yamlordereddictloader==0.4.0',
'pyliblo >= 0.9.1'
],
keywords = ['sound', 'music', 'ableton', 'osc', 'pylive'],
classifiers = [
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Artistic Software',
'Topic :: Communications',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers'
]
)
| 2.046875
| 2
|
vectorflow/__init__.py
|
dongrenguang/VectorFlow
| 0
|
12774154
|
# -*- coding: utf-8 -*-
from . import core
from . import layer
from . import loss
from . import metric
from . import operator
from . import optimizer
from . import saver
from . import trainer
default_graph = core.default_graph
| 1.148438
| 1
|
deepconcolic/training.py
|
nberth/DeepConcolic
| 102
|
12774155
|
from __future__ import absolute_import, division, print_function, unicode_literals
# NB: see head of `datasets.py'
from training_utils import *
from utils_io import os, tempdir
from datasets import image_kinds
print ("Using TensorFlow version:", tf.__version__)
def train_n_save_classifier (model, class_names, input_kind,
train_data, test_data = None,
optimizer = 'adam',
kind = 'sparse_categorical',
outdir = tempdir,
early_stopping = True,
validate_on_test_data = False,
cm_plot_args = {},
**kwds):
x_train, y_train = train_data
path = os.path.join (outdir, model.name)
log_dir = path + '_logs'
fw_train, fw_confision_matrix = \
tf.summary.create_file_writer (os.path.join (log_dir, 'train')), \
tf.summary.create_file_writer (os.path.join (log_dir, 'confusion_matrix'))
# Very basic & dumb test for detecting images...
if input_kind in image_kinds:
log_25_img_dataset_grid (fw_train, class_names, 'Training data (some)', train_data)
model.summary ()
loss, metric = (tf.losses.SparseCategoricalCrossentropy (from_logits=True),
tf.metrics.SparseCategoricalAccuracy ()) # if kind = 'sparse_categorical' else ?
model.compile (optimizer = optimizer,
loss = loss,
metrics = [metric])
callbacks = [
tf.keras.callbacks.ModelCheckpoint (
# Path where to save the model
# The two parameters below mean that we will overwrite
# the current checkpoint if and only if
# the `val_loss` score has improved.
# The saved model name will include the current epoch.
filepath = path + "_{epoch}",
save_best_only = True, # Only save a model if `val_loss` has improved.
monitor = "val_loss",
verbose = 1,
),
tf.keras.callbacks.TensorBoard (
log_dir = log_dir,
histogram_freq = 1, # How often to log histogram visualizations
embeddings_freq = 1, # How often to log embedding visualizations
update_freq = "epoch", # How often to write logs (default: once per epoch)
),
] + ([
# https://www.tensorflow.org/guide/keras/train_and_evaluate#checkpointing_models
tf.keras.callbacks.EarlyStopping (
# Stop training when `val_loss` is no longer improving
monitor = "val_loss",
# "no longer improving" being defined as "no better than 1e-2 less"
min_delta = 1e-2,
# "no longer improving" being further defined as "for at least 2 epochs"
patience = 3,
verbose = 1,
),
] if early_stopping else []) + ([
log_confusion_matrix_callback (
fw_confision_matrix,
model, class_names, test_data,
**cm_plot_args),
] if test_data is not None else [])
valargs = dict (validation_data = test_data) \
if validate_on_test_data and test_data is not None \
else {}
model.fit (x_train, y_train,
callbacks = callbacks,
**{'epochs': 20, # some defaults:
'shuffle': True,
'batch_size': 64,
'validation_split': 0.2,
**valargs,
**kwds})
if test_data is not None:
x_test, y_test = test_data
print ('Performing final validation on given test data:')
# Just check and show accuracy on "official" test data:
_, test_accuracy = model.evaluate (x_test, y_test, verbose = 1)
print ('Validation accuracy on given test data:', test_accuracy)
print ('Saving model in', path + '.h5')
model.save (path + '.h5')
# ---
def classifier (load_data, make_model, model_name = None,
load_data_args = {}, make_model_args = {}, **kwds):
train_data, test_data, input_shape, input_kind, class_names = load_data (**load_data_args)
train_n_save_classifier (make_model (input_shape, name = model_name, **make_model_args),
class_names, input_kind, train_data, test_data, **kwds)
# ---
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Reshape, Dense
def make_dense (input_shape, n_neurons = (100,), n_classes = 5,
input_reshape = False, **kwds):
"""Builds a very basic DNN.
n_neurons: gives the number of neurons for each layer, as a list or
tuple
n_classes: number of output neurons (= |classes|)
input_reshape: whether to include a dummy reshape input layer
(useful to access input features as activations, for DeepConcolic's
internal statistical analysis and layerwise abstractions).
"""
assert len (n_neurons) > 0
layer_args = [dict (activation = 'relu') for _ in n_neurons]
layer_args[0]['input_shape'] = input_shape
layer_args[-1]['activation'] = 'softmax'
layers = (Reshape (input_shape = input_shape, target_shape = input_shape),) if input_reshape else ()
layers += tuple (Dense (n, **args) for n, args in zip (n_neurons, layer_args))
return Sequential (layers, **kwds)
# ---
def make_dense_classifier (load_data, prefix, n_features, n_classes, n_neurons, **kwds):
"""A wrapper for training DNNs built using {make_dense}."""
model_name = (f'{prefix}{n_features}_{n_classes}_dense'
f'_{"_".join (str (c) for c in n_neurons)}')
model_args = dict (n_classes = n_classes, n_neurons = n_neurons)
classifier (load_data, make_dense, epochs = 50,
model_name = model_name, make_model_args = model_args,
**kwds)
# ---
| 2.375
| 2
|
mps_database/tools/depricate/export_thresholds.py
|
slaclab/mps_database
| 0
|
12774156
|
<filename>mps_database/tools/depricate/export_thresholds.py
#!/usr/bin/env python
from mps_database.mps_config import MPSConfig, models
from sqlalchemy import func, exc
import sys
import argparse
#
# Sample Device Input (i.e. Digital Channel) record:
#
# record(bi, "CentralNode:DIGIN0") {
# field(DESC, "Test input")
# field(DTYP, "asynInt32")
# field(SCAN, "1 second")
# field(ZNAM, "OK")
# field(ONAM, "FAULTED")
# field(INP, "@asyn(CENTRAL_NODE 0 3)DIGITAL_CHANNEL")
#}
def printRecord(file, recType, recName, fields):
file.write("record({0}, \"{1}\") {{\n".format(recType, recName))
for name, value in fields:
file.write(" field({0}, \"{1}\")\n".format(name, value))
file.write("}\n\n")
def getAnalogDeviceName(session, analogDevice):
deviceType = session.query(models.DeviceType).filter(models.DeviceType.id==analogDevice.device_type_id).one()
return deviceType.name + ":" + analogDevice.area + ":" + str(analogDevice.position)
#
# Create one bi record for each device state for each analog device
#
# For example, the BPMs have threshold bits for X, Y and TMIT. Each
# one of them has a bit mask to identify the fault. The mask
# is passed to asyn as the third parameter within the
# '@asynMask(PORT ADDR MASK TIMEOUT)' INP record field
#
def exportAnalogThresholds(file, analogDevices, session):
for analogDevice in analogDevices:
name = getAnalogDeviceName(session, analogDevice)
# All these queries are to get the threshold faults
faultInputs = session.query(models.FaultInput).filter(models.FaultInput.device_id==analogDevice.id).all()
for fi in faultInputs:
faults = session.query(models.Fault).filter(models.Fault.id==fi.fault_id).all()
for fa in faults:
faultStates = session.query(models.FaultState).filter(models.FaultState.fault_id==fa.id).all()
for state in faultStates:
if state.device_state.name.endswith("0"):
tname=state.device_state.name[:-1]
# Record for LCLS-I operation high threshold
fields=[]
fields.append(('DESC', 'High analog threshold for {0} LCLS-I'.
format(state.device_state.name)))
fields.append(('DTYP', 'asynFloat64'))
fields.append(('OUT', '@asynMask(LINK_NODE {0} {1} 1)ANALOG_THRESHOLD_LCLSI'.format(analogDevice.channel.number, state.device_state.mask)))
printRecord(file, 'ao', '{0}:{1}_LCLSI_HIHI'.format(name, tname), fields)
# Record for LCLS-I operation low threshold
fields=[]
fields.append(('DESC', 'Low analog threshold for {0} LCLS-I'.
format(state.device_state.name)))
fields.append(('DTYP', 'asynFloat64'))
fields.append(('OUT', '@asynMask(LINK_NODE {0} {1} 0)ANALOG_THRESHOLD_LCLSI'.format(analogDevice.channel.number, state.device_state.mask)))
printRecord(file, 'ao', '{0}:{1}_LCLSI_LOLO'.format(name, tname), fields)
# Record for IDLE operation high threshold
fields=[]
fields.append(('DESC', 'High analog threshold for {0} LCLS-I'.
format(state.device_state.name)))
fields.append(('DTYP', 'asynFloat64'))
fields.append(('OUT', '@asynMask(LINK_NODE {0} {1} 1)ANALOG_THRESHOLD_LCLSI'.format(analogDevice.channel.number, state.device_state.mask)))
printRecord(file, 'ao', '{0}:{1}_IDLE_HIHI'.format(name, tname), fields)
# Record for IDLE operation low threshold
fields=[]
fields.append(('DESC', 'Low analog threshold for {0} LCLS-I'.
format(state.device_state.name)))
fields.append(('DTYP', 'asynFloat64'))
fields.append(('OUT', '@asynMask(LINK_NODE {0} {1} 0)ANALOG_THRESHOLD_LCLSI'.format(analogDevice.channel.number, state.device_state.mask)))
printRecord(file, 'ao', '{0}:{1}_IDLE_LOLO'.format(name, tname), fields)
# Record for normal operation high threshold
fields=[]
fields.append(('DESC', 'High analog threshold for {0}'.
format(state.device_state.name)))
fields.append(('DTYP', 'asynFloat64'))
fields.append(('OUT', '@asynMask(LINK_NODE {0} {1} 1)ANALOG_THRESHOLD'.format(analogDevice.channel.number, state.device_state.mask)))
printRecord(file, 'ao', '{0}:{1}_HIHI'.format(name, state.device_state.name), fields)
# Record for normal operation low threshold
fields=[]
fields.append(('DESC', 'Low analog threshold for {0}'.
format(state.device_state.name)))
fields.append(('DTYP', 'asynFloat64'))
fields.append(('OUT', '@asynMask(LINK_NODE {0} {1} 0)ANALOG_THRESHOLD'.format(analogDevice.channel.number, state.device_state.mask)))
printRecord(file, 'ao', '{0}:{1}_LOLO'.format(name, state.device_state.name), fields)
# Record for normal alternative high threshold
fields=[]
fields.append(('DESC', 'High analog threshold for {0} ALT'.
format(state.device_state.name)))
fields.append(('DTYP', 'asynFloat64'))
fields.append(('OUT', '@asynMask(LINK_NODE {0} {1} 1)ANALOG_THRESHOLD_ALT'.format(analogDevice.channel.number, state.device_state.mask)))
printRecord(file, 'ao', '{0}:{1}_ALT_HIHI'.format(name, state.device_state.name), fields)
# Record for normal alternative low threshold
fields=[]
fields.append(('DESC', 'Low analog threshold for {0} ALT'.
format(state.device_state.name)))
fields.append(('DTYP', 'asynFloat64'))
fields.append(('OUT', '@asynMask(LINK_NODE {0} {1} 0)ANALOG_THRESHOLD_ALT'.format(analogDevice.channel.number, state.device_state.mask)))
printRecord(file, 'ao', '{0}:{1}_ALT_LOLO'.format(name, state.device_state.name), fields)
file.close()
def exportMitiagationDevices(file, mitigationDevices, beamClasses):
fields=[]
fields.append(('DESC', 'Number of beam classes'))
fields.append(('PINI', 'YES'))
fields.append(('VAL', '{0}'.format((len(beamClasses)))))
printRecord(file, 'longout', '$(BASE):NUM_BEAM_CLASSES', fields)
for beamClass in beamClasses:
fields=[]
fields.append(('DESC', '{0}'.format(beamClass.description)))
fields.append(('PINI', 'YES'))
fields.append(('VAL', '{0}'.format(beamClass.number)))
printRecord(file, 'longout', '$(BASE):BEAM_CLASS_{0}'.format(beamClass.number), fields)
for mitigationDevice in mitigationDevices:
fields=[]
fields.append(('DESC', 'Mitigation Device: {0}'.format(mitigationDevice.name)))
fields.append(('DTYP', 'asynInt32'))
fields.append(('SCAN', '1 second'))
fields.append(('INP', '@asyn(CENTRAL_NODE {0} 0)MITIGATION_DEVICE'.format(mitigationDevice.id)))
printRecord(file, 'ai', '$(BASE):{0}_ALLOWED_CLASS'.format(mitigationDevice.name.upper()), fields)
file.close()
def exportFaults(file, faults):
for fault in faults:
fields=[]
fields.append(('DESC', '{0}'.format(fault.description)))
fields.append(('DTYP', 'asynUInt32Digital'))
fields.append(('SCAN', '1 second'))
fields.append(('ZNAM', 'OK'))
fields.append(('ONAM', 'FAULTED'))
fields.append(('INP', '@asynMask(CENTRAL_NODE {0} 1 0)FAULT'.format(fault.id)))
printRecord(file, 'bi', '$(BASE):{0}'.format(fault.name), fields)
fields=[]
fields.append(('DESC', '{0} (ignore)'.format(fault.description)))
fields.append(('DTYP', 'asynUInt32Digital'))
fields.append(('SCAN', '1 second'))
fields.append(('ZNAM', 'Not Ignored'))
fields.append(('ONAM', 'Ignored'))
fields.append(('INP', '@asynMask(CENTRAL_NODE {0} 1 0)FAULT_IGNORED'.format(fault.id)))
printRecord(file, 'bi', '$(BASE):{0}_IGNORED'.format(fault.name), fields)
file.close()
#=== MAIN ==================================================================================
parser = argparse.ArgumentParser(description='Export EPICS template database')
parser.add_argument('database', metavar='db', type=file, nargs=1,
help='database file name (e.g. mps_gun.db)')
parser.add_argument('--app-id', metavar='number', type=int, nargs=1, help='Application ID')
parser.add_argument('--threshold-file', metavar='file', type=argparse.FileType('w'), nargs='?',
help='epics template file name for analog thresholds (e.g. threshold.template)')
args = parser.parse_args()
mps = MPSConfig(args.database[0].name)
id = args.app_id[0]
session = mps.session
try:
appCard = session.query(models.ApplicationCard).filter(models.ApplicationCard.id==id).one()
except exc.SQLAlchemyError:
print(("ERROR: no application card with id " + str(id) + " found. Exiting..."))
session.close()
exit(-1)
if len(appCard.analog_channels) == 0:
print(("ERROR: no analog channels defined for application card " + str(id) + " (name=" + \
appCard.name + ", description=" + appCard.description + "). Exiting..."))
session.close()
exit(-1)
analogDevices = []
for channel in appCard.analog_channels:
analogDevices.append(channel.analog_device)
exportAnalogThresholds(args.threshold_file, analogDevices, session)
session.close()
| 2.484375
| 2
|
300-/372.py
|
yshshadow/Leetcode
| 0
|
12774157
|
# Your task is to calculate ab mod 1337 where a is a positive integer and b is an extremely large positive integer given in the form of an array.
#
# Example1:
#
# a = 2
# b = [3]
#
# Result: 8
# Example2:
#
# a = 2
# b = [1,0]
#
# Result: 1024
| 3.25
| 3
|
src/phasestatemachine/_kernel.py
|
raphaeldeimel/python-phasestatemachine
| 0
|
12774158
|
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@copyright 2017
@licence: 2-clause BSD licence
This file contains the main code for the phase-state machine
"""
import numpy as _np
import pandas as _pd
import itertools
from numba import jit
import warnings as _warnings
@jit(nopython=True, cache=True)
def _limit(a):
"""
faster version of numpy clip, also modifies array in place
"""
#numba doesn't support indexing by boolean
#a[a<lower]=lower
#a[a>upper]=upper
shape = a.shape
for j in range(shape[1]):
for i in range(shape[0]):
if a[i,j] < 0.0:
a[i,j] = 0.0
if a[i,j] > 1.0:
a[i,j] = 1.0
@jit(nopython=True, cache=True)
def _signfunc(x):
return 1.0-2*(x<0)
@jit(nopython=True, cache=True)
def ReLU(x):
return 0.5*(_np.abs(x)+x)
# Alternative, differentiable "sign" function
# Also improves stability of the state's sign
#@jit(nopython=True)
#def _signfunc(x, epsilon=1e-3):
# return _np.tanh(x/epsilon)
#_np.set_printoptions(precision=3, suppress=True)
@jit(nopython=True, cache=True)
def _step(statevector, #main state vector. Input and output, modified in-place
#outputs, modified in place:
dotstatevector, #velocity of main state vector
activationMatrix, #Activation for each potential state and transition
phasesMatrix, #Phases for each transition
phaseVelocitiesMatrix, #Derivative of phases for each transition
#inputs:
phaseVelocityExponentInput, #input to modify velocity of each transition individually (exponential scale, basis 2)
BiasMatrix, #input to depart / avert departure from states
stateConnectivityGreedinessAdjustment, #input to modify how strong a successor state pulls the system towards itself, relative to the predecessor state
stateConnectivityCompetingGreedinessAdjustment, #input to adjust greediness in between compeeting successor states
phasesInput, # phase target in case a transition is enslaved to an external phase
velocityAdjustmentGain, # gain related to enslaving phase
noise_velocity, # vector that gets added to state velocity (usually in order to inject some base noise)
#parameters:
numStates, #number of states / dimensions
betaInv, #precomputed from beta parameter (state locations / scale)
stateConnectivityAbs, #precomputed from state graph
stateConnectivitySignMap, #precomputed from state graph
stateConnectivityIsBidirectional, #precomputed from state graph
stateConnectivityNrEdges, #precomputed from state graph
rhoZero, #coupling values for creating discrete states
rhoDelta, #coupling values for creating stable heteroclinic channels
alpha, #growth rate of states, determines speed of transitioning
dt, # time step duration in seconds
dtInv, #precomputed from dt
nonlinearityParamsLambda, #Kumaraswamy distribution parameters to modify gradualness of activation
nonlinearityParamsPsi, #Kumaraswamy distribution parameters to modify gradualness of phase progress
stateVectorExponent, #modifies the bending of heteroclinic channels
speedLimit, #safety limit to state velocity
epsilonLambda, #determines the region of zero activation around the coordinates axes
#for comparative study:
emulateHybridAutomaton, #set this to true to hack phasta into acting like a discrete state graph / hybrid automaton
triggervalue_successors, #for HA emulation mode, modified in-place
):
"""
Core phase-state machine computation.
Written as a function in order to be able to optimize it with numba
Note: The function modifies several arguments (numpy arrays) in place.
"""
#compute adjustment to the instantaneously effective growth factor
scaledactivation = activationMatrix * (1.0 / max(1.0, _np.sum(activationMatrix)))
kd = 2** _np.sum( scaledactivation * phaseVelocityExponentInput)
#compute mu for phase control:
phaseerrors = activationMatrix * (phasesInput-phasesMatrix)
correctiveAction = phaseerrors * velocityAdjustmentGain
correctiveActionPredecessor = _np.zeros((numStates))
for i in range(numStates):
correctiveActionPredecessor += correctiveAction[:,i]
correctiveActionSuccessor = _np.zeros((numStates))
for i in range(numStates):
correctiveActionSuccessor += correctiveAction[i,:]
mu = correctiveActionPredecessor - correctiveActionSuccessor
statevector_abs = _np.abs(statevector)
#adjust signs of the bias values depending on the transition direction:
biases = _np.dot(BiasMatrix * stateConnectivitySignMap * _np.outer(1-statevector_abs,statevector_abs), statevector)
noise_statevector = noise_velocity * dt
#If requested, decide whether to start a transition using a threshold, and stick to that decision no matter what until the transition finishes
if emulateHybridAutomaton:
predecessors = 1.0*(_np.abs(statevector)*betaInv > 0.99)
successors = (_np.dot(stateConnectivityAbs, predecessors) > 0.5 )
notsuccessors = (_np.dot(stateConnectivityAbs, predecessors) < 0.5 )
triggervalue_successors[notsuccessors] = 0.0
noise_statevector = _np.zeros((numStates))
threshold = 0.1
if _np.any(triggervalue_successors >= threshold ):
chosensuccessor = _np.argmax(triggervalue_successors)
value_chosen = triggervalue_successors[chosensuccessor]
notchosensuccessors = successors.copy()
notchosensuccessors[chosensuccessor] = 0
triggervalue_successors[:] = 0.0
triggervalue_successors[chosensuccessor] = value_chosen
if triggervalue_successors[chosensuccessor] < 1e5:
triggervalue_successors[ chosensuccessor ] = 1e6
#print(chosensuccessor)
noise_statevector[chosensuccessor] = 1.0
else:
triggervalue_successors[:] += biases * dt + noise_velocity
statevector[:] = statevector #for numba
statesigns = _signfunc(statevector)
statesignsOuterProduct = _np.outer(statesigns,statesigns) #precompute this, as we need it several times
#stateVectorExponent=1 #straight channels: |x| (original SHC by Horchler/Rabinovich)
#stateVectorExponent=2 #spherical channels: |x|**2 (default for phasta)
x_gamma = (statevector*statesigns)**stateVectorExponent
#Compute a mask that ensures the attractor works with negative state values too, that the transition's "sign" is observed, and that unidirectional edges do not accidentally change between positive and negative state values
#the computation is formulated such that only algebraic and continuous functions (e.g. ReLu) are used
M_T = ReLU(statesignsOuterProduct*stateConnectivitySignMap)
#Appropriate signs for transition-related greediness adjustment, depending on whether a graph edge is bidirectional or not:
TransitionGreedinessAdjustmentSign = (stateConnectivityNrEdges * ReLU(statesignsOuterProduct) - stateConnectivityIsBidirectional) * stateConnectivitySignMap
#sum everything into a transition/greedinesses matrix (T+G):
T_G = M_T*stateConnectivityAbs + TransitionGreedinessAdjustmentSign*stateConnectivityGreedinessAdjustment + stateConnectivityCompetingGreedinessAdjustment
#This is the core computation and time integration of the dynamical system:
growth = alpha + _np.dot(rhoZero, x_gamma) + _np.dot(rhoDelta * T_G, x_gamma)
dotstatevector[:] = statevector * growth * kd + mu + biases #estimate velocity. do not add noise to velocity, promp mixer doesnt like jumps
dotstatevector_L2 = _np.sqrt(_np.sum(dotstatevector**2))
velocity_limitfactor = _np.minimum(1.0, speedLimit/(1e-8 + dotstatevector_L2)) #limit speed of the motion in state space to avoid extreme phase velocities that a robot cannot
statevector[:] = (statevector + dotstatevector*dt*velocity_limitfactor + noise_statevector) #set the new state
#prepare a normalized state vector for the subsequent operations:
statevector_abs = _np.abs(statevector)
S = statevector_abs.reshape((numStates,1))
S2 = S*S
S_plus_P = S + S.T
statevectorL1 = _np.sum(S)
statevectorL2 = _np.sum(S2)
#compute the transition/state activation matrix (Lambda)
activations = stateConnectivitySignMap * _np.outer(statevector, statevector) * 16 * (statevectorL2) / (S_plus_P**4+statevectorL1**4)
activationMatrix[:,:] = activations * stateConnectivityAbs #function shown in visualization_of_activationfunction.py
_limit(activationMatrix)
#apply nonlinearity:
if (nonlinearityParamsLambda[0] != 1.0 or nonlinearityParamsLambda[1] != 1.0 ):
activationMatrix[:,:] = 1.0-(1.0-activationMatrix**nonlinearityParamsLambda[0])**nonlinearityParamsLambda[1] #Kumaraswamy CDF
#compute the state activation and put it into the diagonal of Lambda:
residual = max(0.0, 1.0 - _np.sum(activationMatrix))
stateactivation_normalized = S2/ _np.sum(S2)
for i in range(numStates):
activationMatrix[i,i] = stateactivation_normalized[i,0] * residual
#compute the phase progress matrix (Psi)
epsilonPsi = 0.0001
newphases = (S+epsilonPsi) / (S_plus_P+2*epsilonPsi)
_limit(newphases)
#apply nonlinearity:
if (nonlinearityParamsPsi[0] != 1.0 or nonlinearityParamsPsi[1] != 1.0 ):
newphases = 1.0-(1.0-newphases**nonlinearityParamsPsi[0])**nonlinearityParamsPsi[1] #Kumaraswamy CDF
phaseVelocitiesMatrix[:,:] = (newphases - phasesMatrix) * dtInv
phasesMatrix[:,:] = newphases
return
_KumaraswamyCDFParameters = {
'kumaraswamy1,1': (1.,1.),
'kumaraswamy2,1': (2.,1.),
'kumaraswamy1,2': (1.,2.),
#values for the Kumaraswamy CDF that approximate the given incomplete beta function:
'beta2,2': (1.913227338072261,2.2301669931409323),
'beta3,3': (2.561444544688591,3.680069490606511),
'beta2,5': (1.6666251656562021,5.9340642444701555),
}
class Kernel():
"""
This class provides a dynamical system that can behave like a state machine.
The transitions are smooth though, which enables interestingbehaviors like online-synchronisation and negotiation of branch alternatives
The most important parameters are:
numStates: the number of quasi-discrete states the system should have
predecessors: a list of lists which defines the preceeding states of each state
Note: Don't set up mutual predecessors (i.e. a loop with two states). This does not work. You need at least 3 states for a loop.
alpha: determines the speed at which a state becomes dominant. Effectively speeds up or slows down the machine
epsilon: "noise" added to the states, which has the effect of reducing the average dwell time for the preceeding states
Less important paramters:
beta: scaling factor for the state variable (usually 1.0)
nu: determines how easy it is to push away from a state (usually 1.5).
dt: time step at which the system is simulated (default: 1e-2)
Inputs:
Observed phase Psi_d: A matrix analogous to the phase matrix, containing phase estimates conditional to the transition or phase being activated
phase control gain K_p: A matrix analogous to the activation matrix, which indicates how confident the state observation is
inputbias: vector that signals which state should currently be the next (e.g. from perception)
Output:
stateVector: The actual, evolving state of the dynamical system.
phase matrix Psi: A (numStates x numStates) matrix aggregating all phase variables for each possible transition, plus the state vector on the diagonal
activation matrix Lambda: A matrix which contains the corresponding transition activation values. state
activations correspond to the 1-sum(transition activations), so that sum(matrix) = 1 (i.e.e can be used as a
weighing matrix)
"""
def __init__(self, **kwargs):
self.numStates = 0
self.t = 0.0
self.statehistorylen = 0
self.historyIndex = 0
self.setParameters(**kwargs)
def setParameters(self,
numStates=3,
predecessors=None,
successors=[[1],[2],[0]],
alphaTime=None,
alpha=40.0,
epsilon=1e-9,
nu=1.0,
beta=1.0,
dt=1e-2,
stateVectorExponent=2.0,
speedLimit = _np.inf,
initialState=0,
nonlinearityLambda='kumaraswamy1,1',
nonlinearityPsi='kumaraswamy1,1',
inputFilterTimeConstant = 0.1,
reuseNoiseSampleTimes = 10,
reset=False,
recordSteps=-1,
emulateHybridAutomaton=False):
"""
Method to set or reconfigure the phase-state-machine
numStates: The number of states the system should have
predecessors: A list of lists which contain the state indices of the respective predecessors
successors: A list of lists which contain the state indices of the respective successors
Note: use of predecessors and successors parameter is mutually exclusive!
For the meaning of the other parameters, please consult the paper or the code
"""
oldcount = self.numStates
#parameters:
self.numStates = numStates
if alphaTime is None: #backwards compatibility: if no alphatime is provided, use dt-dependent alpha value
self.alphaTime = self._sanitizeParam(alpha)/dt
else:
self.alphaTime = self._sanitizeParam(alphaTime)
self.beta = self._sanitizeParam(beta)
self.betaInv = 1.0/self.beta #this is used often, so precompute once
self.nu = self._sanitizeParam(nu)
self.nu_term = self.nu/(1 + self.nu) #equations usually use this term - precompute it
self.epsilon = self._sanitizeParam(epsilon) * self.beta #Wiener process noise
self.epsilonLambda=0.01 #regularization parameter of activation function
self.maxGreediness=10.0 #maximum factor to allow for increasing decisiveness (mainly to guard against input errors)
self.reuseNoiseSampleTimes = reuseNoiseSampleTimes
self.stateVectorExponent =stateVectorExponent
self.speedLimit = speedLimit
if initialState >= self.numStates:
raise ValueError()
self.initialState = initialState
if predecessors is not None: #convert list of predecessors into list of successors
self.successors = self._predecessorListToSuccessorList(predecessors)
else:
self.successors = successors
self.updateDt(dt) #also calls self._updateRho
self.nonlinearityParamsLambda = _KumaraswamyCDFParameters[nonlinearityLambda] #nonlinearity for sparsifying activation values
self.nonlinearityParamsPsi = _KumaraswamyCDFParameters[nonlinearityPsi] #nonlinearity that linearizes phase variables
#inputs:
self.BiasMatrix = _np.zeros((self.numStates,self.numStates)) #determines transition preferences and state timeout duration
self.BiasMatrixDesired = _np.zeros((self.numStates,self.numStates)) #determines transition preferences and state timeout duration
self.emulateHybridAutomaton = emulateHybridAutomaton #set this to true to emulate discrete switching behavior on bias input
self.triggervalue_successors = _np.zeros((self.numStates))
self.phasesInput = _np.zeros((self.numStates,self.numStates)) #input to synchronize state transitions (slower/faster)
self.velocityAdjustmentGain = _np.zeros((self.numStates,self.numStates)) #gain of the control enslaving the given state transition
self.phaseVelocityExponentInput = _np.zeros((self.numStates,self.numStates)) #contains values that limit transition velocity
self.stateConnectivityGreedinessAdjustment = _np.zeros((self.numStates,self.numStates)) #contains values that adjust transition greediness
self.stateConnectivityCompetingGreedinessAdjustment = _np.zeros((self.numStates,self.numStates)) #contains values that adjust competing transition greediness
self.stateConnectivityGreedinessTransitions = _np.zeros((self.numStates,self.numStates))
self.stateConnectivityGreedinessCompetingSuccessors = _np.zeros((self.numStates,self.numStates))
self.inputfilterK = dt / max(dt , inputFilterTimeConstant) #how much inputs should be low-passed (to avoid sudden changes in phasta state)
#internal data structures
if self.numStates != oldcount or reset: #force a reset if number of states change
self.statevector = _np.zeros((numStates))
self.dotstatevector = _np.zeros((numStates))
self.statevector[self.initialState] = self.beta[self.initialState] #start at a state
self.phasesActivation = _np.zeros((self.numStates,self.numStates))
self.phasesProgress = _np.zeros((self.numStates,self.numStates))
self.phasesProgressVelocities = _np.zeros((self.numStates,self.numStates))
self.biases = _np.zeros((self.numStates, self.numStates))
self.noise_velocity = 0.0
self.noiseValidCounter = 0
#these data structures are used to save the history of the system:
if recordSteps< 0:
pass
elif recordSteps == 0:
self.statehistorylen = 0
self.historyIndex = 0
else:
self.statehistorylen = recordSteps
self.statehistory = _np.empty((self.statehistorylen, self.numStates+1))
self.statehistory.fill(_np.nan)
self.phasesActivationHistory= _np.zeros((self.statehistorylen, self.numStates,self.numStates))
self.phasesProgressHistory = _np.zeros((self.statehistorylen, self.numStates,self.numStates))
self.historyIndex = 0
def _updateRho(self):
"""
internal method to compute the P matrix from preset parameters
also computes the state connectivity matrix
reimplements the computation by the SHCtoolbox code
"""
stateConnectivityAbs = _np.zeros((self.numStates, self.numStates))
stateConnectivitySignMap =_np.tri(self.numStates, self.numStates, k=0) - _np.tri(self.numStates, self.numStates, k=-1).T
for state, successorsPerState in enumerate(self.successors):
#precedecessorcount = len(predecessorsPerState)
for successor in successorsPerState:
if state == successor: raise ValueError("Cannot set a state ({0}) as successor of itself!".format(state))
stateConnectivityAbs[successor,state] = 1
stateConnectivitySignMap[successor,state] = 1
stateConnectivitySignMap[state, successor] = -1
self.stateConnectivityAbs = stateConnectivityAbs
self.stateConnectivitySignMap = stateConnectivitySignMap
#precompute some things:
self.stateConnectivityIsBidirectional = _np.sqrt(self.stateConnectivityAbs * self.stateConnectivityAbs.T)
self.stateConnectivityNrEdges = stateConnectivityAbs + stateConnectivityAbs.T
self.stateConnectivity = self.stateConnectivityAbs
#compute a matrix that has ones for states that have a common predecessor, i.e. pairs of states which compete (except for self-competition)
self.connectivitySigned = self.stateConnectivitySignMap*self.stateConnectivityAbs
self.competingStates = _np.dot(self.stateConnectivityAbs, self.stateConnectivityAbs.T) * (1-_np.eye(self.numStates))
#first, fill in the standard values in rhoZero
# rhoZero = beta^-1 x alpha * (1 - I + alpha^-1 x alpha)
alphaInv = 1/self.alpha
s = _np.dot(self.alpha[:,_np.newaxis],self.betaInv[_np.newaxis,:])
rhoZero = s * (_np.eye(self.numStates) - 1 - _np.dot(self.alpha[:,_np.newaxis],alphaInv[_np.newaxis,:]))
#then fill the rhoDelta:
rhoDelta = (self.alpha[:,_np.newaxis]*self.betaInv[_np.newaxis,:] / self.nu_term[:,_np.newaxis])
self.rhoZero = rhoZero
self.rhoDelta = rhoDelta
successorCountInv = 1.0/_np.maximum(_np.sum(self.stateConnectivityAbs, axis=0)[_np.newaxis,:],1.0)
self.BiasMeanBalancingWeights = self.stateConnectivityAbs * successorCountInv
def step(self, until=None, period=None, nr_steps=1):
"""
Main algorithm, implementing the integration step, state space decomposition, phase control and velocity adjustment.
period: give a period to simulate
until: give a time until to simulate
nr_steps: give the number of steps to simulate at self.dt
If more than one argument is given, then precedence is: until > period > nr_steps
"""
if until is not None:
period = until - self.t
if period < 0.0:
raise RuntimeError("argument until is in the past")
#if a period is given, iterate until we finished that period:
if period is not None:
nr_steps = int(period // self.dt)
for i in range(nr_steps):
#execute a single step:
self.t = self.t + self.dt #advance time
self.noiseValidCounter = self.noiseValidCounter - 1
if self.noiseValidCounter <= 0: #do not sample every timestep as the dynamical system cannot react that fast anyway. Effectively low-pass-filters the noise.
self.noise_velocity = _np.random.normal(scale = self.epsilonPerSample, size=self.numStates) #sample a discretized wiener process noise
self.noiseValidCounter = self.reuseNoiseSampleTimes
#low-pass filter input to avoid sudden jumps in velocity
self.BiasMatrix += self.inputfilterK * (self.BiasMatrixDesired-self.BiasMatrix)
self.stateConnectivityGreedinessAdjustment += self.inputfilterK * (self.stateConnectivityGreedinessTransitions - self.stateConnectivityGreedinessAdjustment)
self.stateConnectivityCompetingGreedinessAdjustment += self.inputfilterK * (self.stateConnectivityGreedinessCompetingSuccessors -self.stateConnectivityCompetingGreedinessAdjustment)
_step( #arrays modified in-place:
self.statevector,
self.dotstatevector,
self.phasesActivation,
self.phasesProgress,
self.phasesProgressVelocities,
#inputs
self.phaseVelocityExponentInput,
self.BiasMatrix,
self.stateConnectivityGreedinessAdjustment,
self.stateConnectivityCompetingGreedinessAdjustment,
self.phasesInput,
self.velocityAdjustmentGain,
self.noise_velocity,
#parameters
self.numStates,
self.betaInv ,
self.stateConnectivityAbs,
self.stateConnectivitySignMap,
self.stateConnectivityIsBidirectional,
self.stateConnectivityNrEdges,
self.rhoZero,
self.rhoDelta,
self.alpha,
self.dt,
self.dtInv,
self.nonlinearityParamsLambda,
self.nonlinearityParamsPsi,
self.stateVectorExponent,
self.speedLimit,
self.epsilonLambda,
self.emulateHybridAutomaton,
self.triggervalue_successors
)
#note the currently most active state/transition (for informative purposes)
i = _np.argmax(self.phasesActivation)
self.currentPredecessor = i % self.numStates
self.currentSuccessor = i // self.numStates
self._recordState()
return self.statevector
def get1DState(self):
"""
return value of a one-dimensional signal that indicates which state we are in, or in which transition
"""
value = self.currentPredecessor + (self.currentSuccessor - self.currentPredecessor) * self.phasesProgress[self.currentSuccessor,self.currentPredecessor]
return value
def sayState(self):
"""
returns a string describing the current state
"""
if self.currentPredecessor == self.currentSuccessor:
return "{0}".format(self.currentPredecessor )
else:
return "{0}->{1}".format(self.currentPredecessor , self.currentSuccessor)
def updateDt(self, dt):
"""
upadate the time step used to integrate the dynamical system:
"""
self.dt = dt
self.dtInv = 1.0 / dt
self.epsilonPerSample = self.epsilon *_np.sqrt(self.dt*self.reuseNoiseSampleTimes)/dt #factor accounts for the accumulation during a time step (assuming a Wiener process)
self.alpha = self.alphaTime * self.dt
self._updateRho()
def updateEpsilon(self, epsilon):
"""
Update the noise vector
"""
self.epsilon = epsilon
self.updateDt(self.dt) #need to recompute self.epsilonPerSample
def updateSuccessors(self, listoflist):
"""
recompute the system according to the given list of predecessors
"""
self.successors=listoflist
self._updateRho()
def updateGreediness(self, greedinesses):
"""
update the greediness for competing transitions / successor states
Low values make the system maintain co-activated transitions for a long time, high values make transitions very competitive.
0.0: complete indecisiveness (transitions do not compete at all and may not converge towards an exclusive successor state)
1.0: behavior of the original SHC network by [1]
20.0: extremely greedy transitions, behaves much like a discrete state machine
negative values: abort transition and return to the predecessor state
Absolute values less than 1.0 also reduce speed of transitions, 0.0 stops transitions completely.
This value is considered during a transition away from the predecessor state,
i.e. it influences the transition dynamics while honoring the basic state connectivity
greediness: vector of size self.numStates or matrix of size (numStates,numStates)
scalar: set a common greediness value for all competing transitions
vector: greediness values for all competing transitions leading to the related successor state
matrix: set greediness value for each competing transition individually
"""
greedinesses = _np.asarray(greedinesses)
if greedinesses.ndim == 1:
greedinesses = greedinesses[_np.newaxis,:]
elif greedinesses.ndim == 0:
greedinesses = _np.full((1, self.numStates),greedinesses)
#adjust the strength / reverse direction of the outgoing shc's according to greedinesses:
greediness_successorstates = _np.clip((0.5*greedinesses-0.5), -1.0, 0.0) # _np.clip(g, -self.nu_term, 0.0)
strength = self.stateConnectivityAbs * greediness_successorstates.T #works for (1,-1) transition pairs too
self.stateConnectivityGreedinessTransitions = strength + strength.T
#Adjust competition between nodes according to their greediness:
kappa=0.
# self.stateConnectivityGreedinessCompetingSuccessors = self.competingStates * 0.5*(1-(1.+kappa)*greedinesses+kappa*greedinesses.T)
self.stateConnectivityGreedinessCompetingSuccessors = self.competingStates * 0.5*(1-greedinesses)
def updateCompetingTransitionGreediness(self,greedinesses):
_warnings.warn("Please replace updateCompetingTransitionGreediness with updateGreediness asap!", DeprecationWarning, stacklevel=2)
self.updateGreediness(greedinesses)
def _predecessorListToSuccessorList(self, predecessors):
""" helper to convert lists of predecessor states into lists of successor states"""
successors = [ [] for i in range(self.numStates) ] #create a list of lists
for i, predecessorsPerState in enumerate(predecessors):
for pre in predecessorsPerState:
successors[pre].append(i)
return successors
def updatePredecessors(self, listoflist):
"""
recompute the system according to the given list of predecessors
"""
self.successors = self._predecessorListToSuccessorList(predecessors)
self._updateRho()
def getPredecessors(self):
"""
return the predecessors
"""
successors = [ [] for i in range(self.numStates) ] #create a list of lists
for i, predecessorsPerState in enumerate(predecessors):
for pre in predecessorsPerState:
successors[pre].append(i)
return successors
def updateBiases(self, successorBias):
"""
changes the "bias" input array
Small values bias the system to hasten transitions towards that state
Large, short spikes can be used to override any state and force the system into any state,
regardless of state connectivity
successorBias: numpy array of biases for each (successor state biased towards, current state) pair
if scalar: set all successor biases to the same value
if vector: set successor biases to the given vector for every state
if matrix: set each (successor state, current state) pair individually
"""
bias = _np.asarray(successorBias)
if bias.ndim == 1:
self.BiasMatrixDesired[:,:] = (self.stateConnectivity) * bias[:,_np.newaxis]
else:
self.BiasMatrixDesired[:,:] = bias
def updateB(self, successorBias):
_warnings.warn("Please replace updateB() with updateBiases() asap!",stacklevel=2)
self.updateBiases(successorBias)
def updateTransitionTriggerInput(self, successorBias):
_warnings.warn("Please replace updateTransitionTriggerInput() with updateBiases() asap!",stacklevel=2)
self.updateBiases(successorBias)
def updatePhasesInput(self, phases):
"""
changes the Psi_d matrix
Use this as phase reference to sync the system with a phase from perception
"""
_np.copyto(self.phasesInput, phases)
def updateVelocityEnslavementGain(self, gains):
"""
changes the K_p matrix
Set the gain values to use for each phase transition.
"""
_np.copyto(self.velocityAdjustmentGain, gains)
def updateTransitionPhaseVelocityExponentInput(self, limits):
"""
Update the matrix that specifies how fast the given phases should progress
Each element effectively is an exponent with base 2 for adjusting each phase velocity individually
limits[j,i]: exponent for the transition from i to j
limits[i,i]: 0 (enforced implicitly)
if limits is a vector: treat it as common exponent for transitions of the same predecessor state
if limits is a scalar: set as common exponent for all transitions
While phase velocity can also be controlled by the self.alpha vector directly,
large variations to individual states' alpha parameter can alter the
convergence behavior and we may lose the stable heteroclinic channel properties
This method here effectly "scales" the timeline during transitions
"""
limits = _np.asarray(limits)
if limits.ndim == 1:
limits = limits[_np.newaxis,:]
elif limits.ndim == 0:
limits = limits[_np.newaxis,_np.newaxis]
self.phaseVelocityExponentInput[:,:] = limits
#_np.fill_diagonal(self.phaseVelocityExponentInput , 0.0)
def getHistory(self):
"""
return the historic values for plotting
"""
if self.statehistorylen == 0:
raise RuntimeError("no history is being recorded")
return (self.statehistory[:self.historyIndex,:],
self.phasesActivationHistory[:self.historyIndex,:,:],
self.phasesProgressHistory[:self.historyIndex,:,:]
)
def _sanitizeParam(self, p):
"""
internal helper to provide robust handling of lists and numpy array input data
"""
if _np.isscalar(p):
sanitizedP = _np.empty((self.numStates))
sanitizedP.fill(float(p))
else:
try:
p = p[0:self.numStates]
except IndexError:
raise Exception("Parameter has not the length of numStates!")
sanitizedP = _np.array(p)
return sanitizedP
def _recordState(self):
"""
internal helper to save the current state for later plotting
"""
if self.historyIndex < self.statehistorylen:
self.statehistory[self.historyIndex, 0] = self.t
self.statehistory[self.historyIndex, 1:self.numStates+1] = self.statevector
self.phasesActivationHistory[self.historyIndex, :,:] = self.phasesActivation
self.phasesProgressHistory[self.historyIndex, :,:] = self.phasesProgress
if self.historyIndex < self.statehistorylen:
self.historyIndex = self.historyIndex + 1
| 2.609375
| 3
|
mypong2/emanuel.henrique/mypongpygame.py
|
LUDUSLab/stem-games
| 2
|
12774159
|
import pygame
import random
pygame.init()
COLOR_BLACK = (0, 0, 0)
COLOR_WHITE = (255, 255, 255)
SCORE_MAX = 10
tn = [1, 2, 3, 4, 5]
size = (1280, 720)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("MyPong - PyGame Edition - 2021.01.30")
# score text
score_font = pygame.font.Font('C:/Users/Pichau/Documents/stem-games/mypong2/assets/PressStart2P.ttf', 44)
score_text = score_font.render('00 x 00', True, COLOR_WHITE, COLOR_BLACK)
score_text_rect = score_text.get_rect()
score_text_rect.center = (680, 50)
# victory text
victory_font = pygame.font.Font('C:/Users/Pichau/Documents/stem-games/mypong2/assets/PressStart2P.ttf', 100)
victory_text = victory_font .render('VICTORY', True, COLOR_WHITE, COLOR_BLACK)
victory_text_rect = score_text.get_rect()
victory_text_rect.center = (450, 350)
# sound effects
bounce_sound_effect = pygame.mixer.Sound('C:/Users/Pichau/Documents/stem-games/mypong2/assets/bounce.wav')
scoring_sound_effect = pygame.mixer.Sound('C:/Users/Pichau/Documents/stem-games/mypong2/assets'
'/258020__kodack__arcade-bleep-sound.wav')
# player 1
player_1 = pygame.image.load("C:/Users/Pichau/Documents/stem-games/mypong2/assets/emanuel.henrique_Paddle_Player.png")
player_1_y = 290
player_1_move_up = False
player_1_move_down = False
# player 2 - robot
player_2 = pygame.image.load("C:/Users/Pichau/Documents/stem-games/mypong2/assets/emanuel.henrique_Paddle_AI.png")
player_2_y = 290
# ball
ball = pygame.image.load("C:/Users/Pichau/Documents/stem-games/mypong2/assets/emanuel.henrique_ball.png")
ball_x = 640
ball_y = 360
ball_dx = 3
ball_dy = 3
# score
score_1 = 0
score_2 = 0
game_loop = True
game_clock = pygame.time.Clock()
# game loop
while game_loop:
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_loop = False
# keystroke events
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
player_1_move_up = True
if event.key == pygame.K_DOWN:
player_1_move_down = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_UP:
player_1_move_up = False
if event.key == pygame.K_DOWN:
player_1_move_down = False
# checking the victory condition
if score_1 < SCORE_MAX and score_2 < SCORE_MAX:
# clear screen
screen.fill(COLOR_BLACK)
# ball movement
ball_x = ball_x + ball_dx
ball_y = ball_y + ball_dy
# ball collision with the wall
if ball_y > 700:
ball_dy *= -1
bounce_sound_effect.play()
elif ball_y <= 0:
ball_dy *= -1
bounce_sound_effect.play()
# ball collision with the player 1 's paddle
if (ball_x == 100) and (player_1_y < ball_y + 25) and (player_1_y + 150 > ball_y):
ball_dx *= -1
ball_dy = random.randrange(-15, 16)
bounce_sound_effect.play()
print(ball_dy)
# ball collision with the player 2 's paddle
if (ball_x > 1140) and (player_2_y < ball_y + 25) and (player_2_y + 150 > ball_y):
ball_dx *= -1
ball_dy = random.randrange(-15, 16)
bounce_sound_effect.play()
print(ball_dy)
# scoring points
if ball_x < -50:
ball_x = 640
ball_y = 360
ball_dy *= -1
ball_dx *= -1
score_2 += 1
scoring_sound_effect.play()
elif ball_x > 1280:
ball_x = 640
ball_y = 360
ball_dy *= -1
ball_dx *= -1
score_1 += 1
scoring_sound_effect.play()
# player 1 movement
if player_1_move_up:
player_1_y -= 10
else:
player_1_y += 0
if player_1_move_down:
player_1_y += 10
else:
player_1_y += 0
if player_1_y <= 0:
player_1_y = 0
elif player_1_y >= 570:
player_1_y = 570
# player 2 "Artificial Intelligence"
# player_2_y = ball_y
if (ball_y < 0) or (ball_y < player_2_y+20) and random.randrange(31) == 10:
player_2_y = ball_y-30
if (ball_y > 0) or (ball_y > player_2_y-20) and random.randrange(31) == 10:
player_2_y = ball_y+30
if player_2_y <= 0:
player_2_y = 0
elif player_2_y >= 570:
player_2_y = 570
score_text = score_font.render(str(score_1) + ' x ' + str(score_2), True, COLOR_WHITE, COLOR_BLACK)
# drawing objects
screen.blit(ball, (ball_x, ball_y))
screen.blit(player_1, (60, player_1_y))
screen.blit(player_2, (1180, player_2_y))
screen.blit(score_text, score_text_rect)
else:
# drawing victory
screen.fill(COLOR_BLACK)
screen.blit(score_text, score_text_rect)
screen.blit(victory_text, victory_text_rect)
# update screen
pygame.display.flip()
game_clock.tick(60)
pygame.quit()
| 2.71875
| 3
|
members/views/frontend/fsrc_views.py
|
louking/members
| 1
|
12774160
|
<filename>members/views/frontend/fsrc_views.py
'''
fsrc_views - views specific to the <NAME>asers Running Club
===========================================================================
'''
# standard
from tempfile import TemporaryDirectory
from os.path import join as pathjoin
from mimetypes import guess_type
from shutil import rmtree
# pypi
from flask import request, render_template, jsonify, current_app
from flask.views import MethodView
from werkzeug.utils import secure_filename
from googleapiclient.http import MediaFileUpload
from loutilities.googleauth import GoogleAuthService
from loutilities.flask_helpers.mailer import sendmail
# homegrown
from . import bp
def allowed_file(filename):
return True
# todo: move this into GoogleAuthService
class FsrcGoogleAuthService(GoogleAuthService):
def create_file(self, folderid, filename, contents, doctype='html'):
"""
create file in drive folder
..note::
folderid must be shared read/write with services account email address
:param folderid: drive id for folder file needs to reside in
:param filename: name for file on drive
:param contents: path for file contents
:param doctype: 'html' or 'docx', default 'html'
:return: google drive id for created file
"""
# mimetype depends on doctype
mimetype = guess_type(filename)[0]
## upload (adapted from https://developers.google.com/drive/api/v3/manage-uploads)
file_metadata = {
'name': filename,
# see https://developers.google.com/drive/api/v3/mime-types
'mimeType': mimetype,
# see https://developers.google.com/drive/api/v3/folder
'parents': [folderid],
}
# create file
media = MediaFileUpload(
contents,
mimetype=mimetype,
resumable=True
)
file = self.drive.files().create(
body=file_metadata,
media_body=media,
fields='id'
).execute()
fileid = file.get('id')
return fileid
def create_folder(self, parentid, foldername):
file_metadata = {
'name': foldername,
# see https://developers.google.com/drive/api/v3/mime-types
'mimeType': 'application/vnd.google-apps.folder',
# see https://developers.google.com/drive/api/v3/folder
'parents': [parentid],
}
file = self.drive.files().create(
body=file_metadata,
fields='id'
).execute()
fileid = file.get('id')
return fileid
def list_files(self, folderid, filename=None):
retfiles = []
page_token = None
while True:
# https://developers.google.com/drive/api/v3/search-files
q = '\'{}\' in parents'.format(folderid)
if filename:
q += ' and name = \'{}\''.format(filename)
response = self.drive.files().list(
q=q,
spaces='drive',
fields='nextPageToken, files(id, name)',
pageToken=page_token
).execute()
for file in response.get('files', []):
retfiles.append({file.get('name'): file.get('id')})
page_token = response.get('nextPageToken', None)
if not page_token:
break
return retfiles
class FsrcMemScholarshipAppl(MethodView):
def get(self):
return render_template('fsrc-scholarship-appl.jinja2')
def post(self):
# see https://flask.palletsprojects.com/en/1.1.x/patterns/fileuploads/
# check if the post request has the file part
try:
if 'file[0]' not in request.files:
return jsonify({'status': 'error', 'error': 'No files submitted'})
gs = FsrcGoogleAuthService(current_app.config['GSUITE_SERVICE_KEY_FILE'], current_app.config['GSUITE_SCOPES'])
parentid = current_app.config['FSRC_SCHOLARSHIP_FOLDER']
tmpdir = TemporaryDirectory(prefix='mbr-frsc-')
for i in range(int(request.form['numfiles'])):
file = request.files['file[{}]'.format(i)]
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
return jsonify({'status': 'error', 'error': 'Empty filename detected for file {}'.format(i)})
name = request.form.get('name', '')
if not name:
return jsonify({'status': 'error', 'error': 'Name must be supplied'})
email = request.form.get('email', '')
if not email:
return jsonify({'status': 'error', 'error': 'Email must be supplied'})
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
applnfoldername = '{} {}'.format(name, email)
# has applicant already submitted? reuse filedid if so
# should not be more than one of these, but use the first one if any found
applnfolders = gs.list_files(parentid, filename=applnfoldername)
if applnfolders:
folderid = applnfolders[0][applnfoldername]
else:
folderid = gs.create_folder(parentid, applnfoldername)
current_app.logger.info('fsrcmemscholarshipappl: {}/{} processing '
'file {}'.format(name, email, filename))
docpath = pathjoin(tmpdir.name, filename)
file.save(docpath)
fileid = gs.create_file(folderid, filename, docpath, doctype=None)
# remove temporary directory
rmtree(tmpdir.name, ignore_errors=True)
# send mail to administrator
foldermeta = gs.drive.files().get(fileId=folderid, fields='webViewLink').execute()
folderlink = foldermeta['webViewLink']
subject = "[FSRC Memorial Scholarship] Application from {}".format(name)
from dominate.tags import div, p, a
from dominate.util import text
body = div()
with body:
p('Application received from {} {}'.format(name, email))
with p():
text('See ')
a(folderlink, href=folderlink)
html = body.render()
tolist = current_app.config['FSRC_SCHOLARSHIP_EMAIL']
fromlist = current_app.config['FSRC_SCHOLARSHIP_EMAIL']
cclist = None
sendmail(subject, fromlist, tolist, html, ccaddr=cclist)
return jsonify({'status': 'OK'})
except Exception as e:
from traceback import format_exc
from html import escape
error = format_exc()
return jsonify({'status': 'error', 'error': escape(repr(e))})
bp.add_url_rule('/fsrcmemscholarshipappl', view_func=FsrcMemScholarshipAppl.as_view('fsrcmemscholarshipappl'),
methods=['GET', 'POST'])
| 2.109375
| 2
|
src/main/python/aut/udfs.py
|
ruebot/aut
| 113
|
12774161
|
<reponame>ruebot/aut<filename>src/main/python/aut/udfs.py
from pyspark import SparkContext
from pyspark.sql.column import Column, _to_java_column, _to_seq
from pyspark.sql.functions import col
def compute_image_size(col):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.computeImageSize().apply
)
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def compute_md5(col):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.computeMD5().apply
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def compute_sha1(col):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.computeSHA1().apply
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def detect_language(col):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.detectLanguage().apply
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def detect_mime_type_tika(col):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()
._jvm.io.archivesunleashed.udfs.package.detectMimeTypeTika()
.apply
)
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def extract_boilerplate(col):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()
._jvm.io.archivesunleashed.udfs.package.extractBoilerpipeText()
.apply
)
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def extract_date(col, dates):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.extractDate().apply
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def extract_domain(col):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.extractDomain().apply
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def extract_image_links(col, image_links):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()
._jvm.io.archivesunleashed.udfs.package.extractImageLinks()
.apply
)
return Column(udf(_to_seq(sc, [col, image_links], _to_java_column)))
def extract_links(col, links):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.extractLinks().apply
return Column(udf(_to_seq(sc, [col, links], _to_java_column)))
def get_extension_mime(col, mime):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.getExtensionMime().apply
)
return Column(udf(_to_seq(sc, [col, mime], _to_java_column)))
def remove_http_header(col):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.removeHTTPHeader().apply
)
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def remove_html(col):
sc = SparkContext.getOrCreate()
udf = sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.removeHTML().apply
return Column(udf(_to_seq(sc, [col], _to_java_column)))
def remove_prefix_www(col):
sc = SparkContext.getOrCreate()
udf = (
sc.getOrCreate()._jvm.io.archivesunleashed.udfs.package.removePrefixWWW().apply
)
return Column(udf(_to_seq(sc, [col], _to_java_column)))
| 2.203125
| 2
|
imgy/settings.py
|
claytonbrown/imgy
| 10
|
12774162
|
import os
AWS_REGION = os.environ.get('AWS_REGION')
BUCKET = ""
CACHE_MAX_AGE = 3600
DEFAULT_QUALITY_RATE = 80
LOSSY_IMAGE_FMTS = ('jpg', 'jpeg', 'webp')
| 1.734375
| 2
|
python/paddle/v2/parameters.py
|
yu239/Paddle
| 0
|
12774163
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle.proto.ParameterConfig_pb2 import ParameterConfig
from collections import OrderedDict
import paddle.trainer.config_parser as cp
import struct
import tarfile
import cStringIO
from topology import Topology
__all__ = ['Parameters', 'create']
def create(layers):
"""
Create parameter pool by topology.
:param layers:
:return:
"""
topology = Topology(layers)
pool = Parameters()
initializers = cp.g_parameter_initializer_map
for param in topology.proto().parameters:
pool.__append_config__(param)
if param.name in initializers:
pool[param.name] = initializers[param.name](param.name)
return pool
class Parameters(object):
"""
`Parameters` manages all the learnable parameters in a neural network.
It stores parameters' information in an OrderedDict. The key is
the name of a parameter, and value is a parameter's configuration(in
protobuf format), such as initialization mean and std, its size, whether it
is a static parameter, and so on.
:param __param_conf__: store the configurations of learnable parameters in
the network in an OrderedDict. Parameter is added one by one into the
dict by following their created order in the network: parameters of
the previous layers in a network are careted first. You can visit the
parameters from bottom to top by iterating over this dict.
:type __param_conf__: OrderedDict
:param __gradient_machines__: all of the parameters in a neural network are
appended to a PaddlePaddle gradient machine, which is used internally to
copy parameter values between C++ and Python end.
:type __gradient_machines__: list
:param __tmp_params__: a dict to store dummy parameters if no
__gradient_machines__ is appended to `Parameters`.
:type __tmp_params__: dict
Basically usage is
.. code-block:: python
data = paddle.layers.data(...)
...
out = paddle.layers.fc(...)
parameters = paddle.parameters.create(out)
parameter_names = parameters.names()
fc_mat = parameters.get('fc')
print fc_mat
"""
def __init__(self):
self.__param_conf__ = OrderedDict()
self.__gradient_machines__ = []
self.__tmp_params__ = dict()
def __append_config__(self, param_conf):
"""
Append a parameter configuration. It used to initialize Parameters and
should be invoked only in paddle.parameters.create
:param param_conf: The parameter configuration in protobuf
:type param_conf: ParameterConfig
:return: Nothing
"""
if not isinstance(param_conf, ParameterConfig):
raise ValueError("param_conf must be paddle.proto.ParameterConfig")
if param_conf.name in self.__param_conf__:
raise ValueError("duplicated parameter %s" % param_conf.name)
self.__param_conf__[param_conf.name] = param_conf
def keys(self):
"""
keys are the names of each parameter.
:return: list of parameter name
:rtype: list
"""
return self.__param_conf__.keys()
def names(self):
"""
names of each parameter.
:return: list of parameter name
:rtype: list
"""
return self.keys()
def has_key(self, key):
"""
has_key return true if there are such parameter name == key
:param key: Parameter name
:type key: basestring
:return: True if contains such key
"""
return key in self.__param_conf__.keys()
def __iter__(self):
"""
Return an iterator of parameter name. It is used by `for loop`
or `in` operator.
.. code-block:: python
parameters = paddle.parameters.create(...)
if "fc_param" in parameters:
print 'OK'
:return: an iterator of parameter name
:rtype: iterator
"""
return iter(self.__param_conf__)
def __getter_inner(self, key, param_type):
import py_paddle.swig_paddle as api
shape = self.get_shape(key)
if len(self.__gradient_machines__) == 0:
# create new parameter in python numpy.
if key in self.__tmp_params__:
return self.__tmp_params__[key]
else:
return np.ndarray(shape=shape, dtype=np.float32)
else:
for each_gradient_machine in self.__gradient_machines__:
param = __get_parameter_in_gradient_machine__(
each_gradient_machine, key)
# for simplify implementation now, we always copy from C++
assert isinstance(param, api.Parameter)
val = param.getBuf(param_type)
assert isinstance(val, api.Vector)
val = val.copyToNumpyArray()
return val
# else continue
raise RuntimeError("Unexpected branch")
def __getitem__(self, key):
"""
Get parameter by parameter name. It uses Python dict syntax.
:note: It will always copy the parameter from C++ side.
:param key: Parameter name
:type key: basestring
:return: parameter value
:rtype: np.ndarray
"""
import py_paddle.swig_paddle as api
return self.__getter_inner(key, api.PARAMETER_VALUE)
def get_shape(self, key):
"""
get shape of the parameter.
:param key: parameter name
:type key: basestring
:return: parameter's shape
:rtype: tuple
"""
if not isinstance(key, basestring):
raise ValueError("parameter name should be string")
if not self.has_key(key):
raise ValueError("No such parameter %s" % key)
conf = self.__param_conf__[key]
dims = conf.dims if conf.dims else (1, conf.size)
return tuple(map(int, dims))
def __setitem__(self, key, value):
"""
Set parameter by parameter name & value. It use Python dict syntax.
:note: It will always copy the parameter to C++ side.
:param key: Parameter name
:type key: basestring
:param value: Parameter matrix.
:type value: np.ndarray
:return: Nothing
"""
if not isinstance(value, np.ndarray):
raise ValueError("Must return ndarray")
value = value.astype(dtype=np.float32)
shape = self.get_shape(key)
if value.shape != shape:
raise ValueError("Value shape mismatch, expect %s, should %s" %
(shape, value.shape))
if len(self.__gradient_machines__) == 0:
self.__tmp_params__[key] = value
else:
for each_gradient_machine in self.__gradient_machines__:
__copy_parameter_to_gradient_machine__(each_gradient_machine,
key, value)
def get(self, parameter_name):
"""
Get parameter by parameter name.
:note: It will always copy the parameter from C++ side.
:param parameter_name: parameter name
:type parameter_name: basestring
:return: The parameter matrix.
:rtype: np.ndarray
"""
return self.__getitem__(key=parameter_name)
def get_grad(self, key):
"""
Get grandient by parameter name.
:note: It will always copy the parameter from C++ side.
:param key: parameter name
:type key: basestring
:return: The grandient matrix.
:rtype: np.ndarray
"""
import py_paddle.swig_paddle as api
if self.__param_conf__[key].is_static:
return np.zeros(self.__param_conf__[key].size, dtype=np.float32)
return self.__getter_inner(key, api.PARAMETER_GRADIENT)
def set(self, parameter_name, value):
"""
Set parameter by parameter name & matrix.
:param parameter_name: parameter name
:type parameter_name: basestring
:param value: parameter matrix
:type value: np.ndarray
:return: Nothing.
"""
self.__setitem__(key=parameter_name, value=value)
def append_gradient_machine(self, gradient_machine):
"""
append gradient machine to parameters. This method is used internally in
Trainer.train.
:param gradient_machine: PaddlePaddle C++ GradientMachine object.
:type gradient_machine: api.GradientMachine
:return:
"""
import py_paddle.swig_paddle as api
if not isinstance(gradient_machine, api.GradientMachine):
raise ValueError("gradient_machine should be api.GradientMachine")
if len(self.__tmp_params__) != 0:
for name, val in self.__tmp_params__.iteritems():
try:
__copy_parameter_to_gradient_machine__(gradient_machine,
name, val)
except ValueError:
# If no such parameter in gradient machine, then don't copy
pass
self.__gradient_machines__.append(gradient_machine)
def serialize(self, name, f):
"""
:param name:
:param f:
:type f: file
:return:
"""
param = self.get(name)
size = reduce(lambda a, b: a * b, param.shape)
f.write(struct.pack("IIQ", 0, 4, size))
param = param.astype(np.float32)
s = param.tostring()
wrote_size = 0
buf = buffer(s, wrote_size, 65535)
while buf: # f.write crashes with big data blog.
f.write(buf)
wrote_size += 65535
buf = buffer(s, wrote_size, 65535)
def deserialize(self, name, f):
"""
:param name:
:param f:
:type f: file
:return:
"""
f.read(16) # header
arr = np.frombuffer(f.read(), dtype=np.float32)
self.set(name, arr.reshape(self.get_shape(name)))
def to_tar(self, f):
tar = tarfile.TarFile(fileobj=f, mode='w')
for nm in self.names():
buf = cStringIO.StringIO()
self.serialize(nm, buf)
tarinfo = tarfile.TarInfo(name=nm)
buf.seek(0)
tarinfo.size = len(buf.getvalue())
tar.addfile(tarinfo, buf)
conf = self.__param_conf__[nm]
confStr = conf.SerializeToString()
tarinfo = tarfile.TarInfo(name="%s.protobuf" % nm)
tarinfo.size = len(confStr)
buf = cStringIO.StringIO(confStr)
buf.seek(0)
tar.addfile(tarinfo, fileobj=buf)
@staticmethod
def from_tar(f):
"""
Create a `Parameters` object from the given file. And
the `Parameters` only contains the parameters in this
file. It is adapted the parameters are same in the
defined network and the given file. For example, it
can be used in the inference.
:param f: the initialized model file.
:type f: tar file
:return: A Parameters object.
:rtype: Parameters.
"""
params = Parameters()
tar = tarfile.TarFile(fileobj=f, mode='r')
for finfo in tar:
assert isinstance(finfo, tarfile.TarInfo)
if finfo.name.endswith('.protobuf'):
f = tar.extractfile(finfo)
conf = ParameterConfig()
conf.ParseFromString(f.read())
params.__append_config__(conf)
for param_name in params.names():
f = tar.extractfile(param_name)
params.deserialize(param_name, f)
return params
def init_from_tar(self, f):
"""
Different from `from_tar`, this interface can be used to
init partial network parameters from another saved model.
:param f: the initialized model file.
:type f: tar file
:return: Nothing.
"""
tar_param = Parameters.from_tar(f)
for pname in tar_param.names():
if pname in self.names():
self.set(pname, tar_param.get(pname))
def __get_parameter_in_gradient_machine__(gradient_machine, name):
"""
:param gradient_machine:
:type gradient_machine: api.GradientMachine
:param name:
:return:
:rtype: api.Parameter
"""
params = filter(lambda p: p.getName() == name,
gradient_machine.getParameters())
if len(params) == 0:
raise ValueError("No such parameter")
elif len(params) > 1:
raise ValueError("Unexpected branch")
else:
return params[0]
def __copy_parameter_to_gradient_machine__(gradient_machine, name, arr):
"""
Copy a python ndarray into the gradient machine.
:param gradient_machine:
:type gradient_machine: api.GradientMachine
:param name:
:param arr:
:type arr: np.ndarray
:return:
:rtype: api.Parameter
"""
import py_paddle.swig_paddle as api
param = __get_parameter_in_gradient_machine__(gradient_machine, name)
vec = param.getBuf(api.PARAMETER_VALUE)
assert isinstance(vec, api.Vector)
vec.copyFromNumpyArray(arr.flatten())
| 2.65625
| 3
|
sdk/python/pulumi_cloudflare/access_policy.py
|
juchom/pulumi-cloudflare
| 0
|
12774164
|
<filename>sdk/python/pulumi_cloudflare/access_policy.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class AccessPolicy(pulumi.CustomResource):
application_id: pulumi.Output[str]
"""
The ID of the application the policy is
associated with.
"""
decision: pulumi.Output[str]
"""
Defines the action Access will take if the policy matches the user.
Allowed values: `allow`, `deny`, `bypass`
"""
excludes: pulumi.Output[list]
"""
A series of access conditions, see below for
full list.
* `emailDomains` (`list`)
* `emails` (`list`)
* `everyone` (`bool`)
* `ips` (`list`)
"""
includes: pulumi.Output[list]
"""
A series of access conditions, see below for
full list.
* `emailDomains` (`list`)
* `emails` (`list`)
* `everyone` (`bool`)
* `ips` (`list`)
"""
name: pulumi.Output[str]
"""
Friendly name of the Access Application.
"""
precedence: pulumi.Output[float]
"""
The unique precedence for policies on a single application. Integer.
"""
requires: pulumi.Output[list]
"""
A series of access conditions, see below for
full list.
* `emailDomains` (`list`)
* `emails` (`list`)
* `everyone` (`bool`)
* `ips` (`list`)
"""
zone_id: pulumi.Output[str]
"""
The DNS zone to which the access rule should be
added.
"""
def __init__(__self__, resource_name, opts=None, application_id=None, decision=None, excludes=None, includes=None, name=None, precedence=None, requires=None, zone_id=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a Cloudflare Access Policy resource. Access Policies are used
in conjunction with Access Applications to restrict access to a
particular resource.
## Conditions
`require`, `exclude` and `include` arguments share the available
conditions which can be applied. The conditions are:
* `ip` - (Optional) A list of IP addresses or ranges. Example:
`ip = ["172.16.31.10", "10.0.0.0/2"]`
* `email` - (Optional) A list of email addresses. Example:
`email = ["<EMAIL>"]`
* `email_domain` - (Optional) A list of email domains. Example:
`email_domain = ["example.com"]`
* `everyone` - (Optional) Boolean indicating permitting access for all
requests. Example: `everyone = true`
> This content is derived from https://github.com/terraform-providers/terraform-provider-cloudflare/blob/master/website/docs/r/access_policy.html.markdown.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] application_id: The ID of the application the policy is
associated with.
:param pulumi.Input[str] decision: Defines the action Access will take if the policy matches the user.
Allowed values: `allow`, `deny`, `bypass`
:param pulumi.Input[list] excludes: A series of access conditions, see below for
full list.
:param pulumi.Input[list] includes: A series of access conditions, see below for
full list.
:param pulumi.Input[str] name: Friendly name of the Access Application.
:param pulumi.Input[float] precedence: The unique precedence for policies on a single application. Integer.
:param pulumi.Input[list] requires: A series of access conditions, see below for
full list.
:param pulumi.Input[str] zone_id: The DNS zone to which the access rule should be
added.
The **excludes** object supports the following:
* `emailDomains` (`pulumi.Input[list]`)
* `emails` (`pulumi.Input[list]`)
* `everyone` (`pulumi.Input[bool]`)
* `ips` (`pulumi.Input[list]`)
The **includes** object supports the following:
* `emailDomains` (`pulumi.Input[list]`)
* `emails` (`pulumi.Input[list]`)
* `everyone` (`pulumi.Input[bool]`)
* `ips` (`pulumi.Input[list]`)
The **requires** object supports the following:
* `emailDomains` (`pulumi.Input[list]`)
* `emails` (`pulumi.Input[list]`)
* `everyone` (`pulumi.Input[bool]`)
* `ips` (`pulumi.Input[list]`)
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if application_id is None:
raise TypeError("Missing required property 'application_id'")
__props__['application_id'] = application_id
if decision is None:
raise TypeError("Missing required property 'decision'")
__props__['decision'] = decision
__props__['excludes'] = excludes
if includes is None:
raise TypeError("Missing required property 'includes'")
__props__['includes'] = includes
if name is None:
raise TypeError("Missing required property 'name'")
__props__['name'] = name
__props__['precedence'] = precedence
__props__['requires'] = requires
if zone_id is None:
raise TypeError("Missing required property 'zone_id'")
__props__['zone_id'] = zone_id
super(AccessPolicy, __self__).__init__(
'cloudflare:index/accessPolicy:AccessPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, application_id=None, decision=None, excludes=None, includes=None, name=None, precedence=None, requires=None, zone_id=None):
"""
Get an existing AccessPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] application_id: The ID of the application the policy is
associated with.
:param pulumi.Input[str] decision: Defines the action Access will take if the policy matches the user.
Allowed values: `allow`, `deny`, `bypass`
:param pulumi.Input[list] excludes: A series of access conditions, see below for
full list.
:param pulumi.Input[list] includes: A series of access conditions, see below for
full list.
:param pulumi.Input[str] name: Friendly name of the Access Application.
:param pulumi.Input[float] precedence: The unique precedence for policies on a single application. Integer.
:param pulumi.Input[list] requires: A series of access conditions, see below for
full list.
:param pulumi.Input[str] zone_id: The DNS zone to which the access rule should be
added.
The **excludes** object supports the following:
* `emailDomains` (`pulumi.Input[list]`)
* `emails` (`pulumi.Input[list]`)
* `everyone` (`pulumi.Input[bool]`)
* `ips` (`pulumi.Input[list]`)
The **includes** object supports the following:
* `emailDomains` (`pulumi.Input[list]`)
* `emails` (`pulumi.Input[list]`)
* `everyone` (`pulumi.Input[bool]`)
* `ips` (`pulumi.Input[list]`)
The **requires** object supports the following:
* `emailDomains` (`pulumi.Input[list]`)
* `emails` (`pulumi.Input[list]`)
* `everyone` (`pulumi.Input[bool]`)
* `ips` (`pulumi.Input[list]`)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["application_id"] = application_id
__props__["decision"] = decision
__props__["excludes"] = excludes
__props__["includes"] = includes
__props__["name"] = name
__props__["precedence"] = precedence
__props__["requires"] = requires
__props__["zone_id"] = zone_id
return AccessPolicy(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 1.71875
| 2
|
py/30. Substring with Concatenation of All Words.py
|
longwangjhu/LeetCode
| 3
|
12774165
|
<reponame>longwangjhu/LeetCode
# https://leetcode.com/problems/substring-with-concatenation-of-all-words/
# You are given a string s and an array of strings words of the same length.
# Return all starting indices of substring(s) in s that is a concatenation of each
# word in words exactly once, in any order, and without any intervening
# characters.
# You can return the answer in any order.
################################################################################
# sliding window, use dict for comparison
# check s[0 : m * k] == words
# move sliding window -> check s[k : m * k + k] == words
# loop over offset in range(k) -> check s[offset : offset + m * k] == words
from collections import defaultdict
class Solution:
def findSubstring(self, s: str, words: List[str]) -> List[int]:
n, m, k = len(s), len(words), len(words[0])
# init words dict
words_dict = defaultdict(int)
for word in words:
words_dict[word] += 1
ans = []
running_dict = defaultdict(int)
for offset in range(k):
running_dict.clear()
# init sliding window
left, right = offset, offset + m * k
if right > n: return ans
for i in range(left, right, k):
word = s[i : i + k]
running_dict[word] += 1
if running_dict == words_dict:
ans.append(left)
# move sliding window
for i in range(right, n, k):
next_word = s[i : i + k]
prev_word = s[i - m * k : i - m * k + k]
running_dict[next_word] += 1
running_dict[prev_word] -= 1
if running_dict[prev_word] == 0:
running_dict.pop(prev_word)
if running_dict == words_dict:
ans.append(i - m * k + k)
return ans
| 3.515625
| 4
|
deftwit/forms.py
|
tobias-fyi/deftwit
| 0
|
12774166
|
"""
:: deftwit.forms ::
A source of truthyness for deftwit wtforms.
"""
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, SelectField
from wtforms.validators import DataRequired, Length
from deftwit.models import DB, User, Tweet
class GetUserForm(FlaskForm):
"""
A general class for a Twitter handle input form.
Inherits from flask_wtf.FlaskForm.
Flask-specific subclass of WTForms :class:`~wtforms.form.Form`.
"""
# Text field for user to input target Twitter handle
handle = StringField(
"Twitter Handle", validators=[DataRequired(), Length(min=2, max=15)]
)
# Submit button to add the user to the db
submit = SubmitField("Add User")
class PredictForm(FlaskForm):
"""
A general class for selecting two Twitter users and comparing them
based on a text input field.
Inherits from flask_wtf.FlaskForm.
Flask-specific subclass of WTForms :class:`~wtforms.form.Form`.
"""
# Get list of choices (users) from database
users = User.query.all()
# Create the selection fields - choice tuples defined using list comprehension
user_1 = SelectField(
"Twit #1", choices=[(user.handle, user.handle) for user in users],
)
user_2 = SelectField(
"Twit #2", choices=[(user.handle, user.handle) for user in users],
)
# TODO: create function that generates a random tweet
tweet_text = StringField(
"Tweet text", validators=[DataRequired(), Length(min=1, max=240)]
)
submit = SubmitField("Predict")
| 3.015625
| 3
|
python_raster_functions/ObjectDetector.py
|
ArcGIS/raster-deep-learning
| 154
|
12774167
|
<reponame>ArcGIS/raster-deep-learning<gh_stars>100-1000
'''
Copyright 2018 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import importlib
import json
import os
import sys
sys.path.append(os.path.dirname(__file__))
from fields import fields
from features import features
import numpy as np
import prf_utils
class GeometryType:
Point = 1
Multipoint = 2
Polyline = 3
Polygon = 4
class ObjectDetector:
def __init__(self):
self.name = 'Object Detector'
self.description = 'This python raster function applies deep learning model to detect objects in imagery'
def initialize(self, **kwargs):
if 'model' not in kwargs:
return
model = kwargs['model']
model_as_file = True
try:
with open(model, 'r') as f:
self.json_info = json.load(f)
except FileNotFoundError:
try:
self.json_info = json.loads(model)
model_as_file = False
except json.decoder.JSONDecodeError:
raise Exception("Invalid model argument")
if 'device' in kwargs:
device = kwargs['device']
if device < -1:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
device = prf_utils.get_available_device()
os.environ['CUDA_VISIBLE_DEVICES'] = str(device)
else:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
sys.path.append(os.path.dirname(__file__))
framework = self.json_info['Framework']
if 'ModelConfiguration' in self.json_info:
if isinstance(self.json_info['ModelConfiguration'], str):
ChildModelDetector = getattr(importlib.import_module(
'{}.{}'.format(framework, self.json_info['ModelConfiguration'])), 'ChildObjectDetector')
else:
ChildModelDetector = getattr(importlib.import_module(
'{}.{}'.format(framework, self.json_info['ModelConfiguration']['Name'])), 'ChildObjectDetector')
else:
raise Exception("Invalid model configuration")
self.child_object_detector = ChildModelDetector()
self.child_object_detector.initialize(model, model_as_file)
def getParameterInfo(self):
required_parameters = [
{
'name': 'raster',
'dataType': 'raster',
'required': True,
'displayName': 'Raster',
'description': 'Input Raster'
},
{
'name': 'model',
'dataType': 'string',
'required': True,
'displayName': 'Input Model Definition (EMD) File',
'description': 'Input model definition (EMD) JSON file'
},
{
'name': 'device',
'dataType': 'numeric',
'required': False,
'displayName': 'Device ID',
'description': 'Device ID'
},
{
'name': 'padding',
'dataType': 'numeric',
'value': 0,
'required': False,
'displayName': 'Padding',
'description': 'Padding'
},
{
'name': 'score_threshold',
'dataType': 'numeric',
'value': 0.6,
'required': False,
'displayName': 'Confidence Score Threshold [0.0, 1.0]',
'description': 'Confidence score threshold value [0.0, 1.0]'
},
]
if 'BatchSize' not in self.json_info:
required_parameters.append(
{
'name': 'batch_size',
'dataType': 'numeric',
'required': False,
'value': 1,
'displayName': 'Batch Size',
'description': 'Batch Size'
},
)
return self.child_object_detector.getParameterInfo(required_parameters)
def getConfiguration(self, **scalars):
configuration = self.child_object_detector.getConfiguration(**scalars)
if 'DataRange' in self.json_info:
configuration['dataRange'] = tuple(self.json_info['DataRange'])
configuration['inheritProperties'] = 2|4|8
configuration['inputMask'] = True
return configuration
def getFields(self):
return json.dumps(fields)
def getGeometryType(self):
return GeometryType.Polygon
def vectorize(self, **pixelBlocks):
# set pixel values in invalid areas to 0
raster_mask = pixelBlocks['raster_mask']
raster_pixels = pixelBlocks['raster_pixels']
raster_pixels[np.where(raster_mask == 0)] = 0
pixelBlocks['raster_pixels'] = raster_pixels
polygon_list, scores, classes = self.child_object_detector.vectorize(**pixelBlocks)
# bounding_boxes = bounding_boxes.tolist()
scores = scores.tolist()
classes = classes.tolist()
features['features'] = []
for i in range(len(polygon_list)):
rings = [[]]
for j in range(polygon_list[i].shape[0]):
rings[0].append(
[
polygon_list[i][j][1],
polygon_list[i][j][0]
]
)
features['features'].append({
'attributes': {
'OID': i + 1,
'Class': self.json_info['Classes'][classes[i] - 1]['Name'],
'Confidence': scores[i]
},
'geometry': {
'rings': rings
}
})
return {'output_vectors': json.dumps(features)}
| 2.453125
| 2
|
pybpodgui_plugin/models/setup/board_task/board_task_window.py
|
ckarageorgkaneen/pybpod-gui-plugin
| 0
|
12774168
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import logging
import re
import pyforms as app
from pyforms.basewidget import BaseWidget
from pyforms.controls import ControlList
from pyforms.controls import ControlCheckBox
from pybpodgui_plugin.models.setup.task_variable import TaskVariableWindow
from pybpodgui_api.models.setup.board_task import BoardTask
logger = logging.getLogger(__name__)
class BoardTaskWindow(BoardTask, BaseWidget):
"""
Define here which fields from the board_task model should appear on the setup configuration window.
The model fields shall be defined as UI components like text fields, buttons, combo boxes, etc.
You may also assign actions to these components.
.. seealso::
This class heavy relies on the corresponding API module.
:py:class:`pybpodgui_api.models.setup.board_task.BoardTask`
**Properties**
states
A list of task states associated with this BoardTask. States are defined on the task code.
events
A list of task events associated with this BoardTask. Events are defined on the task code.
variables
A list of task variables associated with this BoardTask. Variables are defined on the task code.
**Private attributes**
_states
:class:`pyforms.controls.ControlList`
UI list to show BoardTask states.
_events
:class:`pyforms.controls.ControlList`
UI list to show BoardTask events.
_vars
:class:`pyforms.controls.ControlList`
UI list to show BoardTask variables.
_sync_btn
:class:`pyforms.controls.ControlButton`
Button to sync variables with board. Pressing the button fires the event :meth:`BoardTaskWindow.sync_variables`.
_load_btn
:class:`pyforms.controls.ControlButton`
Button to read task variables from board. Pressing the button fires the event :meth:`BoardTaskWindow._BoardTaskWindow__load_task_details`.
_formset
Describe window fields organization to PyForms.
**Methods**
"""
def __init__(self, setup):
BaseWidget.__init__(self, "Variables config for {0}".format(setup.name))
self._var_is_being_added = False
self._updvars = ControlCheckBox('Update variables')
self._vars = ControlList('Variables',
add_function=self.__add_variable,
remove_function=self.__remove_variable)
BoardTask.__init__(self, setup)
self._vars.horizontal_headers = ['NAME', 'TYPE', 'VALUE']
self._vars.data_changed_event = self.__varslist_data_changed_evt
self._formset = ['_updvars', '_vars']
self._variable_rule = re.compile('^[A-Z0-9\_]+$')
@property
def update_variables(self):
return self._updvars.value
@update_variables.setter
def update_variables(self, value):
self._updvars.value = value
def create_variable(self, name=None, value=None, datatype='string'):
return TaskVariableWindow(self, name, value, datatype)
def __varslist_data_changed_evt(self, row, col, item):
# only verify if the list is being edited
if self._var_is_being_added is True:
return
if col == 0 and item is not None:
if not (self._variable_rule.match(item) and item.startswith('VAR_')):
self.critical("The name of the variable should start with VAR_, should be alphanumeric and upper case.",
"Error")
self._vars.set_value(
col, row,
'VAR_{0}'.format( self._vars.rows_count))
elif col == 2:
datatype_combo = self._vars.get_value(1, row)
datatype = datatype_combo.value if datatype_combo else None
if datatype == 'number' and isinstance(item, str) and not item.isnumeric():
self.message("The value should be numeric.", "Error")
self._vars.set_value(
col, row,
'0'
)
def __add_variable(self):
self._var_is_being_added = True
var = self.create_variable(
'VAR_{0}'.format(self._vars.rows_count),
'0'
)
self._var_is_being_added = False
def __remove_variable(self):
if self._vars.selected_row_index is not None:
var = self.variables[self._vars.selected_row_index]
self.variables.remove(var)
self._vars -= -1
def before_close(self):
return False
# Execute the application
if __name__ == "__main__":
app.start_app(BoardTaskWindow)
| 2.734375
| 3
|
AutoNode/exceptions.py
|
jvalteren/auto-node
| 7
|
12774169
|
"""
Library of custom exceptions raised by AutNode
"""
from cryptography.fernet import InvalidToken
from pyhmy import Typgpy
from .common import (
node_config,
save_node_config
)
class ResetNode(Exception):
"""
The only exception that triggers a hard reset.
"""
def __init__(self, *args, clean=False):
node_config['clean'] = clean
save_node_config()
super(ResetNode, self).__init__(*args)
class InvalidWalletPassphrase(InvalidToken):
"""
Exception raised if passphrase is invalid
"""
def __init__(self):
super(InvalidToken, self).__init__(f"Re-authenticate wallet passphrase with "
f"'{Typgpy.OKGREEN}auto-node auth-wallet{Typgpy.ENDC}'")
| 2.65625
| 3
|
migrations/versions/7b47983c2ea0_create_table_budget_item.py
|
flaviogf/architecture_example_august_2019
| 2
|
12774170
|
"""create table budget_item
Revision ID: 7b47983c2ea0
Revises: 89794c69ffab
Create Date: 2019-09-07 11:46:49.554912
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '7b47983c2ea0'
down_revision = '89794c69ffab'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('budget_item',
sa.Column('uid',
sa.String(36),
primary_key=True),
sa.Column('name',
sa.String(250),
nullable=False),
sa.Column('quantity',
sa.Integer,
nullable=False),
sa.Column('price',
sa.Float,
nullable=False),
sa.Column('total',
sa.Float,
nullable=False),
sa.Column('budget_uid',
sa.String(36),
sa.ForeignKey('budget.uid'),
nullable=False))
def downgrade():
op.drop_table('budget_item')
| 1.5
| 2
|
i2c_search.py
|
jaka/usbi2c-python3
| 3
|
12774171
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
import serial
from sys import platform
from usbi2c.usbi2c import *
verbose = False
def probe(adapter, address):
try:
adapter.Address(address)
adapter.Start()
adapter.WaitForCompletion()
return True
except Exception as e:
if e.char and e.char == b'T' and address == 1:
raise Exception("Pull-up resistors missing")
return False
def serial_name():
if platform == "linux":
return "/dev/ttyACM0"
else:
return "COM4"
try:
name = serial_name()
if verbose:
print ("Using serial port: %s" % name)
uart = serial.Serial(name, timeout = 1)
adapter = USBI2C(uart)
adapter.Reset()
serial = adapter.Serial()
print("Connected to adapter with serial number: %s" % serial)
print("Scanning for I2C devices ...")
for address in range(1, 128):
detected = probe(adapter, address)
result = "found" if detected else "not found"
if verbose:
print("Probing address %x ... %s" % (address, result))
elif detected:
print ("%s on 0x%x (%d)" %
(result.capitalize(), address, address))
uart.close()
except Exception as e:
print(e)
finally:
print("Finished scanning")
| 3.1875
| 3
|
client/client.py
|
Fish-game-dev/python-web-socket-chat-program
| 1
|
12774172
|
<reponame>Fish-game-dev/python-web-socket-chat-program<filename>client/client.py<gh_stars>1-10
import socket
import os
def SendString(HOST, PORT):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.connect((HOST, PORT))
sock.sendall(bytes(name + ": " + data + "\n", "utf-8"))
ReceivedData = str(sock.recv(1024), "utf-8")
os.system('cls' if os.name == 'nt' else 'clear')
print("{}".format(ReceivedData))
if __name__ == '__main__':
HOST = input("IP Adress:")
PORT = int(input("Port Number:"))
name = input("name: ")
while (1 == 1):
data = input("> ")
SendString(HOST, PORT)
| 3.296875
| 3
|
pyvips/tests/test_create.py
|
kleisauke/pyvips
| 0
|
12774173
|
<filename>pyvips/tests/test_create.py
# vim: set fileencoding=utf-8 :
import unittest
import pyvips
from .helpers import PyvipsTester
class TestCreate(PyvipsTester):
def test_black(self):
im = pyvips.Image.black(100, 100)
self.assertEqual(im.width, 100)
self.assertEqual(im.height, 100)
self.assertEqual(im.format, pyvips.BandFormat.UCHAR)
self.assertEqual(im.bands, 1)
for i in range(0, 100):
pixel = im(i, i)
self.assertEqual(len(pixel), 1)
self.assertEqual(pixel[0], 0)
im = pyvips.Image.black(100, 100, bands=3)
self.assertEqual(im.width, 100)
self.assertEqual(im.height, 100)
self.assertEqual(im.format, pyvips.BandFormat.UCHAR)
self.assertEqual(im.bands, 3)
for i in range(0, 100):
pixel = im(i, i)
self.assertEqual(len(pixel), 3)
self.assertAlmostEqualObjects(pixel, [0, 0, 0])
def test_buildlut(self):
M = pyvips.Image.new_from_array([[0, 0],
[255, 100]])
lut = M.buildlut()
self.assertEqual(lut.width, 256)
self.assertEqual(lut.height, 1)
self.assertEqual(lut.bands, 1)
p = lut(0, 0)
self.assertEqual(p[0], 0.0)
p = lut(255, 0)
self.assertEqual(p[0], 100.0)
p = lut(10, 0)
self.assertEqual(p[0], 100 * 10.0 / 255.0)
M = pyvips.Image.new_from_array([[0, 0, 100],
[255, 100, 0],
[128, 10, 90]])
lut = M.buildlut()
self.assertEqual(lut.width, 256)
self.assertEqual(lut.height, 1)
self.assertEqual(lut.bands, 2)
p = lut(0, 0)
self.assertAlmostEqualObjects(p, [0.0, 100.0])
p = lut(64, 0)
self.assertAlmostEqualObjects(p, [5.0, 95.0])
def test_eye(self):
im = pyvips.Image.eye(100, 90)
self.assertEqual(im.width, 100)
self.assertEqual(im.height, 90)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.FLOAT)
self.assertEqual(im.max(), 1.0)
self.assertEqual(im.min(), -1.0)
im = pyvips.Image.eye(100, 90, uchar=True)
self.assertEqual(im.width, 100)
self.assertEqual(im.height, 90)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.UCHAR)
self.assertEqual(im.max(), 255.0)
self.assertEqual(im.min(), 0.0)
def test_fractsurf(self):
im = pyvips.Image.fractsurf(100, 90, 2.5)
self.assertEqual(im.width, 100)
self.assertEqual(im.height, 90)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.FLOAT)
def test_gaussmat(self):
im = pyvips.Image.gaussmat(1, 0.1)
self.assertEqual(im.width, 5)
self.assertEqual(im.height, 5)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.DOUBLE)
self.assertEqual(im.max(), 20)
total = im.avg() * im.width * im.height
scale = im.get("scale")
self.assertEqual(total, scale)
p = im(im.width / 2, im.height / 2)
self.assertEqual(p[0], 20.0)
im = pyvips.Image.gaussmat(1, 0.1,
separable=True, precision="float")
self.assertEqual(im.width, 5)
self.assertEqual(im.height, 1)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.DOUBLE)
self.assertEqual(im.max(), 1.0)
total = im.avg() * im.width * im.height
scale = im.get("scale")
self.assertEqual(total, scale)
p = im(im.width / 2, im.height / 2)
self.assertEqual(p[0], 1.0)
def test_gaussnoise(self):
im = pyvips.Image.gaussnoise(100, 90)
self.assertEqual(im.width, 100)
self.assertEqual(im.height, 90)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.FLOAT)
im = pyvips.Image.gaussnoise(100, 90, sigma=10, mean=100)
self.assertEqual(im.width, 100)
self.assertEqual(im.height, 90)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.FLOAT)
sigma = im.deviate()
mean = im.avg()
self.assertAlmostEqual(sigma, 10, places=0)
self.assertAlmostEqual(mean, 100, places=0)
def test_grey(self):
im = pyvips.Image.grey(100, 90)
self.assertEqual(im.width, 100)
self.assertEqual(im.height, 90)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.FLOAT)
p = im(0, 0)
self.assertEqual(p[0], 0.0)
p = im(99, 0)
self.assertEqual(p[0], 1.0)
p = im(0, 89)
self.assertEqual(p[0], 0.0)
p = im(99, 89)
self.assertEqual(p[0], 1.0)
im = pyvips.Image.grey(100, 90, uchar=True)
self.assertEqual(im.width, 100)
self.assertEqual(im.height, 90)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.UCHAR)
p = im(0, 0)
self.assertEqual(p[0], 0)
p = im(99, 0)
self.assertEqual(p[0], 255)
p = im(0, 89)
self.assertEqual(p[0], 0)
p = im(99, 89)
self.assertEqual(p[0], 255)
def test_identity(self):
im = pyvips.Image.identity()
self.assertEqual(im.width, 256)
self.assertEqual(im.height, 1)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.UCHAR)
p = im(0, 0)
self.assertEqual(p[0], 0.0)
p = im(255, 0)
self.assertEqual(p[0], 255.0)
p = im(128, 0)
self.assertEqual(p[0], 128.0)
im = pyvips.Image.identity(ushort=True)
self.assertEqual(im.width, 65536)
self.assertEqual(im.height, 1)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.USHORT)
p = im(0, 0)
self.assertEqual(p[0], 0)
p = im(99, 0)
self.assertEqual(p[0], 99)
p = im(65535, 0)
self.assertEqual(p[0], 65535)
def test_invertlut(self):
lut = pyvips.Image.new_from_array([[0.1, 0.2, 0.3, 0.1],
[0.2, 0.4, 0.4, 0.2],
[0.7, 0.5, 0.6, 0.3]])
im = lut.invertlut()
self.assertEqual(im.width, 256)
self.assertEqual(im.height, 1)
self.assertEqual(im.bands, 3)
self.assertEqual(im.format, pyvips.BandFormat.DOUBLE)
p = im(0, 0)
self.assertAlmostEqualObjects(p, [0, 0, 0])
p = im(255, 0)
self.assertAlmostEqualObjects(p, [1, 1, 1])
p = im(0.2 * 255, 0)
self.assertAlmostEqual(p[0], 0.1, places=2)
p = im(0.3 * 255, 0)
self.assertAlmostEqual(p[1], 0.1, places=2)
p = im(0.1 * 255, 0)
self.assertAlmostEqual(p[2], 0.1, places=2)
def test_logmat(self):
im = pyvips.Image.logmat(1, 0.1)
self.assertEqual(im.width, 7)
self.assertEqual(im.height, 7)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.DOUBLE)
self.assertEqual(im.max(), 20)
total = im.avg() * im.width * im.height
scale = im.get("scale")
self.assertEqual(total, scale)
p = im(im.width / 2, im.height / 2)
self.assertEqual(p[0], 20.0)
im = pyvips.Image.logmat(1, 0.1,
separable=True, precision="float")
self.assertEqual(im.width, 7)
self.assertEqual(im.height, 1)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.DOUBLE)
self.assertEqual(im.max(), 1.0)
total = im.avg() * im.width * im.height
scale = im.get("scale")
self.assertEqual(total, scale)
p = im(im.width / 2, im.height / 2)
self.assertEqual(p[0], 1.0)
def test_mask_butterworth_band(self):
im = pyvips.Image.mask_butterworth_band(128, 128, 2,
0.5, 0.5, 0.7,
0.1)
self.assertEqual(im.width, 128)
self.assertEqual(im.height, 128)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.FLOAT)
self.assertAlmostEqual(im.max(), 1, places=2)
p = im(32, 32)
self.assertEqual(p[0], 1.0)
im = pyvips.Image.mask_butterworth_band(128, 128, 2,
0.5, 0.5, 0.7,
0.1, uchar=True, optical=True)
self.assertEqual(im.width, 128)
self.assertEqual(im.height, 128)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.UCHAR)
self.assertEqual(im.max(), 255)
p = im(32, 32)
self.assertEqual(p[0], 255.0)
p = im(64, 64)
self.assertEqual(p[0], 255.0)
im = pyvips.Image.mask_butterworth_band(128, 128, 2,
0.5, 0.5, 0.7,
0.1, uchar=True, optical=True,
nodc=True)
self.assertEqual(im.width, 128)
self.assertEqual(im.height, 128)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.UCHAR)
self.assertEqual(im.max(), 255)
p = im(32, 32)
self.assertEqual(p[0], 255.0)
p = im(64, 64)
self.assertNotEqual(p[0], 255)
def test_mask_butterworth(self):
im = pyvips.Image.mask_butterworth(128, 128, 2, 0.7, 0.1,
nodc=True)
self.assertEqual(im.width, 128)
self.assertEqual(im.height, 128)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.FLOAT)
self.assertAlmostEqual(im.min(), 0, places=2)
p = im(0, 0)
self.assertEqual(p[0], 0.0)
v, x, y = im.maxpos()
self.assertEqual(x, 64)
self.assertEqual(y, 64)
im = pyvips.Image.mask_butterworth(128, 128, 2, 0.7, 0.1,
optical=True, uchar=True)
self.assertEqual(im.width, 128)
self.assertEqual(im.height, 128)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.UCHAR)
self.assertAlmostEqual(im.min(), 0, places=2)
p = im(64, 64)
self.assertEqual(p[0], 255)
def test_mask_butterworth_ring(self):
im = pyvips.Image.mask_butterworth_ring(128, 128, 2, 0.7, 0.1, 0.5,
nodc=True)
self.assertEqual(im.width, 128)
self.assertEqual(im.height, 128)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.FLOAT)
p = im(45, 0)
self.assertAlmostEqual(p[0], 1.0, places=4)
v, x, y = im.minpos()
self.assertEqual(x, 64)
self.assertEqual(y, 64)
def test_mask_fractal(self):
im = pyvips.Image.mask_fractal(128, 128, 2.3)
self.assertEqual(im.width, 128)
self.assertEqual(im.height, 128)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.FLOAT)
def test_mask_gaussian_band(self):
im = pyvips.Image.mask_gaussian_band(128, 128, 0.5, 0.5, 0.7, 0.1)
self.assertEqual(im.width, 128)
self.assertEqual(im.height, 128)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.FLOAT)
self.assertAlmostEqual(im.max(), 1, places=2)
p = im(32, 32)
self.assertEqual(p[0], 1.0)
def test_mask_gaussian(self):
im = pyvips.Image.mask_gaussian(128, 128, 0.7, 0.1,
nodc=True)
self.assertEqual(im.width, 128)
self.assertEqual(im.height, 128)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.FLOAT)
self.assertAlmostEqual(im.min(), 0, places=2)
p = im(0, 0)
self.assertEqual(p[0], 0.0)
def test_mask_gaussian_ring(self):
im = pyvips.Image.mask_gaussian_ring(128, 128, 0.7, 0.1, 0.5,
nodc=True)
self.assertEqual(im.width, 128)
self.assertEqual(im.height, 128)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.FLOAT)
p = im(45, 0)
self.assertAlmostEqual(p[0], 1.0, places=3)
def test_mask_ideal_band(self):
im = pyvips.Image.mask_ideal_band(128, 128, 0.5, 0.5, 0.7)
self.assertEqual(im.width, 128)
self.assertEqual(im.height, 128)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.FLOAT)
self.assertAlmostEqual(im.max(), 1, places=2)
p = im(32, 32)
self.assertEqual(p[0], 1.0)
def test_mask_ideal(self):
im = pyvips.Image.mask_ideal(128, 128, 0.7,
nodc=True)
self.assertEqual(im.width, 128)
self.assertEqual(im.height, 128)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.FLOAT)
self.assertAlmostEqual(im.min(), 0, places=2)
p = im(0, 0)
self.assertEqual(p[0], 0.0)
def test_mask_gaussian_ring_2(self):
im = pyvips.Image.mask_ideal_ring(128, 128, 0.7, 0.5,
nodc=True)
self.assertEqual(im.width, 128)
self.assertEqual(im.height, 128)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.FLOAT)
p = im(45, 0)
self.assertAlmostEqual(p[0], 1.0, places=3)
def test_sines(self):
im = pyvips.Image.sines(128, 128)
self.assertEqual(im.width, 128)
self.assertEqual(im.height, 128)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.FLOAT)
def test_text(self):
if pyvips.type_find("VipsOperation", "text") != 0:
im = pyvips.Image.text("Hello, world!")
self.assertTrue(im.width > 10)
self.assertTrue(im.height > 10)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.UCHAR)
self.assertEqual(im.max(), 255)
self.assertEqual(im.min(), 0)
def test_tonelut(self):
im = pyvips.Image.tonelut()
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.USHORT)
self.assertEqual(im.width, 32768)
self.assertEqual(im.height, 1)
self.assertTrue(im.hist_ismonotonic())
def test_xyz(self):
im = pyvips.Image.xyz(128, 128)
self.assertEqual(im.bands, 2)
self.assertEqual(im.format, pyvips.BandFormat.UINT)
self.assertEqual(im.width, 128)
self.assertEqual(im.height, 128)
p = im(45, 35)
self.assertAlmostEqualObjects(p, [45, 35])
def test_zone(self):
im = pyvips.Image.zone(128, 128)
self.assertEqual(im.width, 128)
self.assertEqual(im.height, 128)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.FLOAT)
def test_worley(self):
im = pyvips.Image.worley(512, 512)
self.assertEqual(im.width, 512)
self.assertEqual(im.height, 512)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.FLOAT)
def test_perlin(self):
im = pyvips.Image.perlin(512, 512)
self.assertEqual(im.width, 512)
self.assertEqual(im.height, 512)
self.assertEqual(im.bands, 1)
self.assertEqual(im.format, pyvips.BandFormat.FLOAT)
if __name__ == '__main__':
unittest.main()
| 2.375
| 2
|
src/eeyore_nlp/pipelines/__init__.py
|
dpasse/eeyore
| 0
|
12774174
|
<reponame>dpasse/eeyore<filename>src/eeyore_nlp/pipelines/__init__.py
from .abs import AbsPipe, \
TextPipe, \
ContextPipe
from .context_pipes import ChunkerPipe, \
TokenAttributesPipe, \
TokenTaggerPipe, \
EmptyContextPipe, \
MergerPipe
from .text_pipes import EmptyTextPipe, \
ContractionsTextPipe, \
ExpandTextPipe, \
RapidRegexReplaceTextPipe
from .context_pipeline import ContextPipeline
from .text_pipeline import TextPipeline
from .context_factory import ContextFactory, \
PreTaggedContextFactory
from .tokenizers import ContextTokenizer, \
BlockContextTokenizer
from .pipe_wrappers import ContextPipeWrapper, \
TextPipeWrapper
| 0.960938
| 1
|
tests/test_dumpb_and_loadb.py
|
aecay/jsons
| 1
|
12774175
|
<filename>tests/test_dumpb_and_loadb.py
import json
from unittest import TestCase
import jsons
class TestDumpbAndLoadb(TestCase):
def test_dumpb(self):
class A:
def __init__(self):
self.name = 'A'
class B:
def __init__(self, a: A):
self.a = a
self.name = 'B'
dumped = jsons.dumpb(B(A()), jdkwargs={'sort_keys': True})
b = json.dumps({'a': {'name': 'A'}, 'name': 'B'},
sort_keys=True).encode()
self.assertEqual(b, dumped)
def test_loadb(self):
class A:
def __init__(self):
self.name = 'A'
class B:
def __init__(self, a: A):
self.a = a
self.name = 'B'
b = json.dumps({'a': {'name': 'A'}, 'name': 'B'}).encode()
loaded_dict = jsons.loadb(b)
self.assertDictEqual(loaded_dict, {'a': {'name': 'A'}, 'name': 'B'})
loaded_obj = jsons.loadb(b, B)
self.assertEqual('B', loaded_obj.name)
self.assertEqual('A', loaded_obj.a.name)
| 2.84375
| 3
|
phileo/signals.py
|
dheeru0198/phileo
| 0
|
12774176
|
import django.dispatch
object_liked = django.dispatch.Signal(providing_args=["like", "request"])
object_unliked = django.dispatch.Signal(providing_args=["object", "request"])
| 1.429688
| 1
|
test/praatio_test_case.py
|
timmahrt/praatIO
| 208
|
12774177
|
<filename>test/praatio_test_case.py
import unittest
import os
class PraatioTestCase(unittest.TestCase):
def __init__(self, *args, **kargs):
super(PraatioTestCase, self).__init__(*args, **kargs)
root = os.path.dirname(os.path.realpath(__file__))
self.dataRoot = os.path.join(root, "files")
self.outputRoot = os.path.join(self.dataRoot, "test_output")
def setUp(self):
if not os.path.exists(self.outputRoot):
os.mkdir(self.outputRoot)
def assertAllAlmostEqual(self, listA, listB):
for valA, valB in zip(listA, listB):
self.assertAlmostEqual(valA, valB)
| 2.8125
| 3
|
sparselandtools/applications/utils.py
|
mttk/sparselandtools
| 56
|
12774178
|
import numpy as np
import imageio
import os
AVAILABLE_IMAGES = ['barbara']
def _add_noise(img, sigma):
noise = np.random.normal(scale=sigma,
size=img.shape).astype(img.dtype)
return img + noise
def example_image(img_name, noise_std=0):
imgf = os.path.join('sparselandtools', 'applications', 'assets', img_name + '.png')
# read image
try:
img = imageio.imread(imgf)[:, :, 0].astype('float32')
except IndexError:
img = imageio.imread(imgf).astype('float32')
# add noise
img = _add_noise(img, sigma=noise_std)
return img
| 2.640625
| 3
|
project euler solutions/Problem_023b-with_Haskell.py
|
helq/old_code
| 0
|
12774179
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# run Time: few seconds with runHaskell ~ 12 s
import subprocess as sub
p = sub.Popen(["runhaskell", "Problem_023.hs"], stdout=sub.PIPE)
aNums, b = p.communicate()
aNums = eval(aNums)
lenNums = 28123
noAbundantNums = [True for i in range(lenNums+1)]
lenAbun = len( aNums )
for i in xrange(lenAbun):
for j in xrange(i, lenAbun):
noAbun = aNums[i] + aNums[j]
if noAbun <= lenNums:
noAbundantNums[noAbun] = False
else: break
tot = 0
for i in xrange(lenNums+1):
if noAbundantNums[i]:
tot += i
print tot # 4179871
| 2.828125
| 3
|
apps/dbd/admin.py
|
TryNeo/sistema-gestion-pedidos
| 0
|
12774180
|
<reponame>TryNeo/sistema-gestion-pedidos<filename>apps/dbd/admin.py<gh_stars>0
from django.contrib import admin
from apps.dbd.modelos.estructura_model_producto import Producto
from apps.dbd.modelos.esctructura_model_cliente import Cliente
from apps.dbd.modelos.estructura_model_catalogo import Categoria
from apps.dbd.modelos.estructura_model_pedido import Pedido,PedidoItem
admin.site.register(Producto)
admin.site.register(Cliente)
admin.site.register(Categoria)
admin.site.register(Pedido)
admin.site.register(PedidoItem)
| 1.15625
| 1
|
run_mnist.py
|
jaredgorski/firstcnn
| 1
|
12774181
|
import mnist
import numpy as np
import pickle
import cnn
training_images = mnist.train_images()
training_labels = mnist.train_labels()
## uncomment below to train mnist images as RGB data
# import cv2
# training_images_rgb = []
# for i, image in enumerate(training_images):
# training_images_rgb.append(cv2.cvtColor(image, cv2.COLOR_GRAY2RGB))
# training_images = np.array(training_images_rgb)
classes = [x for x in range(10)]
# initialize
net = None
answer = input("Would you like to load a model? (enter 'y' to load): ")
should_load = answer == 'y'
if should_load:
filename = input("Enter a filename (without the extension): ")
pickle_in = open(f'{filename}.pickle','rb')
net = pickle.load(pickle_in)
else:
layers = [
cnn.layers.Conv(num_kernels=16),
cnn.layers.MaxPool(),
cnn.layers.SoftMax(num_classes=10),
]
net = cnn.CNN(layers)
# train
answer = input("Would you like to train? (enter 'y' to train): ")
should_train = answer == 'y'
if should_train:
net.train(training_images, training_labels, classes, num_epochs=5, rate=0.005)
# predict
answer = input("Would you like to test the model? (enter 'y' to test): ")
should_test = answer == 'y'
if should_test:
print('\n\n>>> Testing model...\n')
test_images = mnist.test_images()[:1000]
test_labels = mnist.test_labels()[:1000]
num_correct = 0
for image, label in zip(test_images, test_labels):
prediction_index = net.predict(image)
prediction = classes[prediction_index]
correct_add = 1 if prediction == label else 0
num_correct += correct_add
num_tests = len(test_images)
percent_accurate = round(((num_correct / num_tests) * 100), 3)
print(f'Prediction accuracy ({num_tests} attempts): {percent_accurate}%\n')
# save model
answer = input("Would you like to save the model? (enter 'y' to save): ")
should_save = answer == 'y'
if should_save:
filename = input("Enter a filename (without the extension): ")
with open(f'{filename}.pickle','wb') as f:
pickle.dump(net, f)
| 3.40625
| 3
|
core/backend.py
|
MeRajat/MAX-Review-Text-Generator
| 0
|
12774182
|
<gh_stars>0
from keras.backend import clear_session
from keras import models
import numpy as np
import json
import logging
logger = logging.getLogger()
from config import DEFAULT_MODEL_PATH, DEFAULT_MODEL_FILE, SEED_TEXT_LEN
# (Fixed) length of seed text that can serve as input to the generative model
_SEED_TEXT_LEN = 256
class ModelWrapper(object):
"""Model wrapper for Keras models"""
def __init__(self, path=DEFAULT_MODEL_PATH, model_file=DEFAULT_MODEL_FILE):
logger.info('Loading model from: {}...'.format(path))
model_path = '{}/{}'.format(path, model_file)
clear_session()
self.model = models.load_model(model_path)
# this seems to be required to make Keras models play nicely with threads
self.model._make_predict_function()
logger.info('Loaded model: {}'.format(self.model.name))
self._load_assets(path)
def _load_assets(self, path):
with open('{}/char_indices.txt'.format(path)) as f:
self.char_indices = json.loads(f.read())
self.chars = sorted(self.char_indices.keys())
self.num_chars = len(self.chars)
with open('{}/indices_char.txt'.format(path)) as f:
self.indices_char = json.loads(f.read())
def _sample(self, preds, temperature=.6):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def predict(self, sentence, gen_chars=50):
'''
Generate text based on seed text.
Args:
sentence: Input seed text to kick off generation.
gen_chars: How many characters of text to generate.
Returns generated text
'''
# The model was trained on lowercase text only, and there is no
# provision in the model itself for handling characters that are
# out of vocabulary.
# To compensate, turn everything into lowercase, then check for
# out-of-vocab characters in the result.
sentence = sentence.lower()
for t, char in enumerate(sentence):
if char not in self.char_indices:
print("Bad char {} at position {}".format(char, t))
raise ValueError(
"Unexpected character '{}' at position {}. "
"Only lowercase ASCII characters, spaces, "
"and basic punctuation are supported.".format(char, t))
# The text passed into the model must be exactly SEED_TEXT_LEN
# characters long, or the model will crash. Pad or truncate.
if len(sentence) > SEED_TEXT_LEN:
sentence = sentence[:SEED_TEXT_LEN]
else:
sentence = sentence.rjust(SEED_TEXT_LEN)
generated = ''
for i in range(gen_chars):
x = np.zeros((1, SEED_TEXT_LEN, self.num_chars))
for t, char in enumerate(sentence):
x[0, t, self.char_indices[char]] = 1.
preds = self.model.predict(x, verbose=0)[0]
next_index = self._sample(preds)
next_char = self.indices_char[str(next_index)]
generated += next_char
sentence = sentence[1:] + next_char
return generated
| 2.234375
| 2
|
resume_builder/__init__.py
|
nilesh1168/Resume_Builder
| 0
|
12774183
|
from flask import Flask
from resume_builder.config import Configuration
app = Flask(__name__)
app.config.from_object(Configuration)
from resume_builder import routes,models
| 1.617188
| 2
|
examples/human_sar/net_latent.py
|
Cysu/dlearn
| 4
|
12774184
|
import os
import sys
import argparse
import numpy as np
import theano.tensor as T
homepath = os.path.join('..', '..')
if not homepath in sys.path:
sys.path.insert(0, homepath)
from dlearn.models.layer import FullConnLayer, ConvPoolLayer
from dlearn.models.nnet import NeuralNet
from dlearn.utils import actfuncs, costfuncs
from dlearn.utils.serialize import load_data, save_data
from dlearn.optimization import sgd
# Program arguments parser
desctxt = """
Train latent network. Use learned attribute and segmentation network.
"""
dataset_txt = """
The input dataset data_name.pkl.
"""
attr_txt = """
The attribute network model_name.pkl.
"""
seg_txt = """
The segmentation network model_name.pkl.
"""
output_txt = """
If not specified, the output model will be saved as model_latent.pkl.
Otherwise it will be saved as model_latent_name.pkl.
"""
parser = argparse.ArgumentParser(description=desctxt)
parser.add_argument('-d', '--dataset', nargs=1, required=True,
metavar='name', help=dataset_txt)
parser.add_argument('-a', '--attribute', nargs=1, required=True,
metavar='name', help=attr_txt)
parser.add_argument('-s', '--segmentation', nargs=1, required=True,
metavar='name', help=seg_txt)
parser.add_argument('-o', '--output', nargs='?', default=None,
metavar='name', help=output_txt)
args = parser.parse_args()
def train_model(dataset, attr_model, seg_model):
def shape_constrained_pooling(fmaps):
s = fmaps.sum(axis=[2, 3])
Z = abs(actfuncs.tanh(fmaps)).sum(axis=[2, 3])
return s / Z
X = T.tensor4()
A = T.matrix()
feature_layers = []
feature_layers.append(ConvPoolLayer(
input=X,
input_shape=(3, 160, 80),
filter_shape=(32, 3, 5, 5),
pool_shape=(2, 2),
active_func=actfuncs.tanh,
flatten=False,
W=attr_model.blocks[0]._W,
b=0.0
))
feature_layers.append(ConvPoolLayer(
input=feature_layers[-1].output,
input_shape=feature_layers[-1].output_shape,
filter_shape=(64, 32, 5, 5),
pool_shape=(2, 2),
active_func=actfuncs.tanh,
flatten=False,
W=attr_model.blocks[1]._W,
b=0.0
))
seg_layers = []
seg_layers.append(FullConnLayer(
input=feature_layers[-1].output.flatten(2),
input_shape=np.prod(feature_layers[-1].output_shape),
output_shape=1024,
dropout_ratio=0.1,
active_func=actfuncs.tanh,
W=seg_model.blocks[2]._W,
b=seg_model.blocks[2]._b
))
seg_layers.append(FullConnLayer(
input=seg_layers[-1].output,
input_shape=seg_layers[-1].output_shape,
output_shape=37 * 17,
dropout_input=seg_layers[-1].dropout_output,
active_func=actfuncs.sigmoid,
W=seg_model.blocks[3]._W,
b=seg_model.blocks[3]._b
))
S = seg_layers[-1].output
S = S * (S >= 0.1)
S = S.reshape((S.shape[0], 37, 17))
S = S.dimshuffle(0, 'x', 1, 2)
S_dropout = seg_layers[-1].dropout_output
S_dropout = S_dropout * (S_dropout >= 0.1)
S_dropout = S_dropout.reshape((S_dropout.shape[0], 37, 17))
S_dropout = S_dropout.dimshuffle(0, 'x', 1, 2)
attr_layers = []
'''
attr_layers.append(ConvPoolLayer(
input=feature_layers[-1].output * S,
input_shape=feature_layers[-1].output_shape,
filter_shape=(128, 64, 3, 3),
pool_shape=(2, 2),
dropout_input=feature_layers[-1].output * S_dropout,
active_func=actfuncs.tanh,
flatten=False,
W=attr_model.blocks[2]._W,
b=0.0
))
'''
attr_layers.append(FullConnLayer(
input=shape_constrained_pooling(feature_layers[-1].output * S),
input_shape=feature_layers[-1].output_shape,
output_shape=64,
dropout_input=shape_constrained_pooling(
feature_layers[-1].dropout_output * S_dropout),
dropout_ratio=0.1,
active_func=actfuncs.tanh,
W=attr_model.blocks[2]._W,
b=attr_model.blocks[2]._b
))
attr_layers.append(FullConnLayer(
input=attr_layers[-1].output,
input_shape=attr_layers[-1].output_shape,
output_shape=11,
dropout_input=attr_layers[-1].dropout_output,
active_func=actfuncs.sigmoid,
W=attr_model.blocks[3]._W,
b=attr_model.blocks[3]._b
))
model = NeuralNet(feature_layers + seg_layers + attr_layers,
X, attr_layers[-1].output)
model.target = A
model.cost = costfuncs.binxent(attr_layers[-1].dropout_output, A) + \
1e-3 * model.get_norm(2)
model.error = costfuncs.binerr(attr_layers[-1].output, A)
sgd.train(model, dataset, lr=1e-3, momentum=0.9,
batch_size=100, n_epochs=300,
epoch_waiting=10)
return model
if __name__ == '__main__':
dataset_file = 'data_{0}.pkl'.format(args.dataset[0])
attr_file = 'model_{0}.pkl'.format(args.attribute[0])
seg_file = 'model_{0}.pkl'.format(args.segmentation[0])
out_file = 'model_latent.pkl' if args.output is None else \
'model_latent_{0}.pkl'.format(args.output)
dataset = load_data(dataset_file)
attr_model = load_data(attr_file)
seg_model = load_data(seg_file)
model = train_model(dataset, attr_model, seg_model)
save_data(model, out_file)
| 2.375
| 2
|
tests/test_transport_websocket.py
|
zentropi/python-zentropi
| 14
|
12774185
|
import pytest
from zentropi import Agent
from zentropi import Frame
from zentropi import WebsocketTransport
from zentropi.transport import websocket
class MockWebsockets(object):
def __init__(self, login_ok=True, send_ok=True, recv_ok=True):
self._login_ok = login_ok
self._send_ok = send_ok
self._recv_ok = recv_ok
self.frame = None
async def connect(self, endpoint):
return self
async def close(self):
pass
async def send(self, data):
if not self._send_ok:
raise ConnectionAbortedError()
frame = Frame.from_json(data)
if frame.name == 'login':
if self._login_ok:
self.frame = frame.reply('login-ok').to_json()
else:
self.frame = frame.reply('login-failed').to_json()
return
self.frame = data
async def recv(self):
if not self._recv_ok:
raise ConnectionAbortedError()
return self.frame
@pytest.mark.asyncio
async def test_websocket_transport(monkeypatch):
monkeypatch.setattr(websocket, 'websockets', MockWebsockets())
wt = WebsocketTransport()
frame = Frame('test-frame')
await wt.connect('ws://localhost:6789/', 'test-token')
assert wt.connected is True
await wt.send(frame)
assert wt.connection.frame
frame_recv = await wt.recv()
assert frame_recv.name == 'test-frame'
await wt.close()
assert wt.connected is False
@pytest.mark.asyncio
@pytest.mark.xfail(raises=PermissionError)
async def test_websocket_transport_login_fail(monkeypatch):
monkeypatch.setattr(websocket, 'websockets', MockWebsockets(login_ok=False))
wt = WebsocketTransport()
frame = Frame('test-frame')
await wt.connect('ws://localhost:6789/', 'test-token')
@pytest.mark.asyncio
@pytest.mark.xfail(raises=ConnectionError)
async def test_websocket_transport_send_fail(monkeypatch):
monkeypatch.setattr(websocket, 'websockets', MockWebsockets(send_ok=False))
wt = WebsocketTransport()
frame = Frame('test-frame')
await wt.connect('ws://localhost:6789/', 'test-token')
@pytest.mark.asyncio
@pytest.mark.xfail(raises=ConnectionError)
async def test_websocket_transport_recv_fail(monkeypatch):
monkeypatch.setattr(websocket, 'websockets', MockWebsockets(recv_ok=False))
wt = WebsocketTransport()
frame = Frame('test-frame')
await wt.connect('ws://localhost:6789/', 'test-token')
# @pytest.mark.asyncio
# async def test_agent_with_websocket_endpoint():
# a = Agent('test-agent')
# test_event_handler_was_run = False
# @a.on_event('startup')
# async def startup(frame): # pragma: no cover
# await a.connect('ws://localhost:6789/', 'test-token')
# await a.event('test')
# @a.on_event('test')
# async def test(frame): # pragma: no cover
# nonlocal test_event_handler_was_run
# test_event_handler_was_run = True
# await a.close()
# a.stop()
# await a.start()
# assert test_event_handler_was_run is True
# @pytest.mark.asyncio
# @pytest.mark.xfail(raises=ConnectionError)
# async def test_agent_with_websocket_login_fail():
# a = Agent('test-agent')
# @a.on_event('startup')
# async def startup(frame): # pragma: no cover
# await a.connect('ws://localhost:6789/', 'fail-token')
# await a.start()
| 2.234375
| 2
|
test/test_edit_group.py
|
Oliebert/Testing_training
| 0
|
12774186
|
<gh_stars>0
from model.group import Group
from random import randrange
import random
def test_edit_group_name(app, db, check_ui):
if app.group.count() == 0: # falls keine Gruppe gibt´s
app.group.create(Group(name="New_group")) # erstellen wir eine Gruppe
old_groups = db.get_group_list() # Liste der Gruppen bevors Hinzufügen einer neuen Gruppe
# index = randrange(len(old_groups))
group = random.choice(old_groups)
old_groups.remove(group)
edit_group = Group(name="Edit_group")
edit_group.id = group.id # eine id von der alte gruppe behalten wir bei
old_groups.append(edit_group)
app.group.edit_group_by_id(group.id, edit_group)
new_groups = db.get_group_list()
#assert len(old_groups) == len(new_groups)
assert sorted(new_groups, key=Group.id_or_max) == sorted(old_groups, key=Group.id_or_max)
if check_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
'''
def test_edit_group_footer(app):
if app.group.count() == 0: # falls keine Gruppe gibt´s
app.group.create(Group(name="test")) # erstellen wir eine Gruppe
old_groups = app.group.get_group_list()
app.group.edit_first_group(Group(footer="header_name_changed"))
new_groups = app.group.get_group_list()
assert len(old_groups) == len(new_groups)
'''
| 2.40625
| 2
|
#13.py
|
dev-farmaan/Project-Euler
| 0
|
12774187
|
# Work out the first ten digits of the sum of N 50 digit numbers.
total = 0
for x in range(int(input())):
total += int(input().rstrip())
print(str(total)[:10])
| 3.578125
| 4
|
app/views/contexts/submit_questionnaire_context.py
|
petechd/eq-questionnaire-runner
| 0
|
12774188
|
from typing import Generator, Mapping, Union
from flask_babel import lazy_gettext
from app.questionnaire.location import Location
from .context import Context
from .section_summary_context import SectionSummaryContext
class SubmitQuestionnaireContext(Context):
def __call__(
self, answers_are_editable: bool = True
) -> dict[str, Union[str, dict]]:
summary_options = self._schema.get_summary_options()
collapsible = summary_options.get("collapsible", False)
submission_schema: Mapping = self._schema.get_submission() or {}
title = submission_schema.get("title") or lazy_gettext(
"Check your answers and submit"
)
submit_button = submission_schema.get("button") or lazy_gettext(
"Submit answers"
)
guidance = submission_schema.get("guidance") or lazy_gettext(
"Please submit this survey to complete it"
)
warning = submission_schema.get("warning") or None
context = {
"title": title,
"guidance": guidance,
"warning": warning,
"submit_button": submit_button,
}
if summary_options:
context["summary"] = self._get_summary_context(
collapsible, answers_are_editable
)
return context
def _get_summary_context(
self, collapsible: bool, answers_are_editable: bool
) -> dict[str, Union[list, bool, str]]:
groups = list(self._build_all_groups())
return {
"groups": groups,
"answers_are_editable": answers_are_editable,
"collapsible": collapsible,
"summary_type": "Summary",
}
def _build_all_groups(self) -> Generator[dict, None, None]:
""" NB: Does not support repeating sections """
for section_id in self._router.enabled_section_ids:
location = Location(section_id=section_id)
section_summary_context = SectionSummaryContext(
language=self._language,
schema=self._schema,
answer_store=self._answer_store,
list_store=self._list_store,
progress_store=self._progress_store,
metadata=self._metadata,
current_location=location,
return_to="final-summary",
routing_path=self._router.routing_path(section_id),
)
section: Mapping = self._schema.get_section(section_id) or {}
if section.get("summary", {}).get("items"):
break
for group in section_summary_context()["summary"]["groups"]:
yield group
| 2.40625
| 2
|
waliki/git/urls.py
|
luzik/waliki
| 324
|
12774189
|
<filename>waliki/git/urls.py
import django
try:
from django.conf.urls import patterns, url # django 1.8, 1.9
except ImportError:
from django.conf.urls import url
from waliki.settings import WALIKI_SLUG_PATTERN
from waliki.git.views import whatchanged, WhatchangedFeed, webhook_pull, history, version, diff
_pattern_list = [
url(r'^_whatchanged/(?P<pag>\d+)$', whatchanged, name='waliki_whatchanged'), # noqa
url(r'^_whatchanged$', whatchanged, {'pag': '1'}, name='waliki_whatchanged'), # noqa
url(r'^_whatchanged/rss$', WhatchangedFeed(), name='waliki_whatchanged_rss'),
url(r'^_hooks/pull/(?P<remote>[a-zA-Z0-9]+)$',
webhook_pull, name='waliki_webhook_pull'),
url(r'^(?P<slug>' + WALIKI_SLUG_PATTERN + ')/history/(?P<pag>\d+)$',
history, name='waliki_history'),
url(r'^(?P<slug>' + WALIKI_SLUG_PATTERN + ')/history/$',
history, {'pag': '1'}, name='waliki_history'),
url(r'^(?P<slug>' + WALIKI_SLUG_PATTERN + ')/version/(?P<version>[0-9a-f\^]{4,40})/raw$', version, {'raw': True},
name='waliki_version_raw'),
url(r'^(?P<slug>' + WALIKI_SLUG_PATTERN + \
')/version/(?P<version>[0-9a-f\^]{4,40})$', version, name='waliki_version'),
url(r'^(?P<slug>' + WALIKI_SLUG_PATTERN + ')/diff/(?P<old>[0-9a-f\^]{4,40})\.\.(?P<new>[0-9a-f\^]{4,40})/raw$',
diff, {'raw': True}, name='waliki_diff_raw'),
url(r'^(?P<slug>' + WALIKI_SLUG_PATTERN + \
')/diff/(?P<old>[0-9a-f\^]{4,40})\.\.(?P<new>[0-9a-f\^]{4,40})$', diff, name='waliki_diff'),
]
if django.VERSION[:2] >= (1, 10):
urlpatterns = _pattern_list
else:
urlpatterns = patterns('waliki.git.views',
*_pattern_list
)
| 2.15625
| 2
|
encoder_ui/api/ffprobe.py
|
hidnoiz/encoder_ui
| 0
|
12774190
|
import subprocess
import json
class FFProbe:
def __init__(self):
self.bin = 'ffprobe'
def exec(self, cmd: str) -> str:
p = subprocess.Popen(cmd.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
is_error = False
stdout, _ = p.communicate()
if stdout:
stdout = stdout.decode('utf-8')
if p.returncode != 0:
is_error = 1
return {'stdout': stdout, 'is_error': is_error}
def get_streams(self, addr: str) -> (dict, str):
"""
:param addr - address of stream
:returned dict - dict with streams
:returned str - stderr if not empty or if json parse error
"""
error_msg = 'Error when probe stream {}'.format(addr)
cmd = '{} -loglevel quiet -hide_banner -print_format json -show_streams -show_programs {}'.format(self.bin, addr)
output = self.exec(cmd)
if output.get('is_error'):
return None, '{}'.format(error_msg)
try:
return json.loads(output.get('stdout')), None
except json.decoder.JSONDecodeError:
return None, '{} json is not valid (cmd: {}, output: {})'.format(error_msg, cmd, output)
except Exception as e:
return None, '{} {} (cmd: {}, output: {})'.format(error_msg, str(e), cmd, output)
| 2.640625
| 3
|
Programs/ResourceEditor/Teamcity/getGitTime.py
|
stinvi/dava.engine
| 26
|
12774191
|
import os
import sys
if len(sys.argv) > 1:
branch = sys.argv[1]
else:
branch = ' '
os.chdir('../')
os.system("git config --global log.date local")
os.chdir('../../')
os.system("git log -1 --format=\"%ci\" > gitTime.txt")
| 2.25
| 2
|
blog/migrations/0001_initial.py
|
orangedigitallab/django-tinyblog
| 3
|
12774192
|
<reponame>orangedigitallab/django-tinyblog
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-29 12:33
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
("taggit", "0002_auto_20150616_2121"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Entry",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(db_index=True, max_length=500, unique=True)),
("slug", models.SlugField(max_length=140, unique=True)),
("body", models.TextField()),
("created_at", models.DateTimeField(auto_now_add=True)),
("modified_at", models.DateTimeField(auto_now=True)),
(
"published_date",
models.DateTimeField(blank=True, editable=False, null=True),
),
("poster", models.ImageField(blank=True, null=True, upload_to="")),
("is_published", models.BooleanField(default=False)),
(
"author",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="entries",
to=settings.AUTH_USER_MODEL,
),
),
(
"tags",
taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="taggit.TaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
],
options={
"verbose_name": "Entry",
"verbose_name_plural": "Entries",
"db_table": "entries",
"default_related_name": "entries",
},
)
]
| 1.703125
| 2
|
Luhn.py
|
Cesar-rso/CreditCardValidation
| 0
|
12774193
|
<gh_stars>0
#LuhnValid:
#Função que usa o algoritmo de Luhn para validar numeros (cartão de crédito, or exemplo)
#Parametro 'num' deve ser informado como string. Os caracteres '.' (ponto) e ' ' (espaço) são aceitos, porém nenhum outro caracter, somente numeros.
#Retorna 1 se o numero é válido,
# retorna 0 se o numero não é válido
# e retorna -1 em caso de erro (se for informado caracteres não numéricos, se o numero informado é menor que 2 caracteres ou se o parametro não for uma string)
import os
import json
def LuhnValid (num):
if isinstance(num,str) == False:
return -1
num = num.replace(" ", "")
num = num.replace(".", "")
if num.isdecimal() and len(num)>2:
numtrim = []
for counter in range(len(num)):
numtrim.append(int(num[counter]))
if (counter == 0) or (counter%2 == 0):
numtrim[counter] = numtrim[counter]*2
if(numtrim[counter]>9):
numtrim[counter] = numtrim[counter]-9
numsum = 0
for counter2 in range(len(numtrim)):
numsum = numsum + numtrim[counter2]
if(numsum%10 == 0):
return 1
else:
return 0
else:
return -1
#------------------------------------------------------------------------------------------------
#CardProvider:
#Função que verifica o numero do cartão e retorna o provedor do serviço de cartão em formato string
#Inclui apenas cartões de uso mais comum no Brasil
#Parametro 'num' deve ser informado como string. Os caracteres '.' (ponto) e ' ' (espaço) são aceitos, porém nenhum outro caracter, somente numeros.
def CardProvider (num, arq):
if isinstance(num,str) == False:
return "Erro! Numero de cartão fornecido não esta em formato string"
num = num.replace(" ", "")
num = num.replace(".", "")
if os.path.exists(arq):
with open(arq, "r") as arq_json:
dicionario=json.load(arq_json)
else:
return "Erro! Arquivo JSON não encontrado"
company_name = "Provedor do serviço de cartão não encontrado"
for company, value in dicionario.items():
dict2 = value[0] #Python salva a variável value como uma lista com um elemento dict. Portanto a variavel dict2 salva esse valor como dicionario
for num_len, prefix in dict2.items():
if len(num) == int(num_len):
for digits in prefix:
if num[:len(digits)] == digits:
company_name = company
return company_name
| 3.640625
| 4
|
test/unit/command/test_status.py
|
alphagov/ghtools
| 3
|
12774194
|
from mock import patch
from ghtools.command.status import status, parser
class TestRepo(object):
def setup(self):
self.patcher = patch('ghtools.command.status.Repo')
self.mock_repo = self.patcher.start()
self.mock_repo.return_value.set_build_status.return_value.json.return_value = {}
def teardown(self):
self.patcher.stop()
def test_status(self):
args = parser.parse_args([
'alphagov/foobar',
'mybranch',
'pending',
'--description', 'Running on Jenkins',
'--url', 'http://ci.alphagov.co.uk/foo',
'--context', 'CI'
])
status(args)
self.mock_repo.assert_called_with('alphagov/foobar')
self.mock_repo.return_value.set_build_status.assert_called_with(
'mybranch',
{
'state': 'pending',
'target_url': 'http://ci.alphagov.co.uk/foo',
'description': 'Running on Jenkins',
'context': 'CI'
}
)
| 2.5
| 2
|
actions/concat.py
|
Sylvaner/PyConverter
| 0
|
12774195
|
__author__ = "<NAME>"
__licence__ = "Apache 2.0"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class concat():
def action(input_data, params, current_row, current_index):
"""Concat multiple columns in the cell.
:param input_data: Not used.
:param params: Dict with following keys:
- col_list: List of columns to concat in the cell.
- separator (optional): Separator to put between columns.
:param current_row: Not used
:param current_index: Not used
:return: Value to set.
:rtype: str
"""
if isinstance(params, dict):
col_list = []
if 'col_list' in params:
for col in params['col_list']:
col_list.append(current_row[col - 1])
if 'separator' in params:
return params['separator'].join(col_list)
else:
return ''.join(col_list)
return input_data
| 3.140625
| 3
|
algorithms/151/changeBase.py
|
WebClub-NITK/Hacktoberfest-2k19
| 28
|
12774196
|
<reponame>WebClub-NITK/Hacktoberfest-2k19
#!/bin/python3
import sys
def symbol(num):
if num < 0 or num > 61:
return 'No Symbol'
if num < 10:
return str(num)
if num < 36:
return chr(ord('A') + num - 10)
else:
return chr(ord('a') + num - 36)
def value(symbol):
if symbol >= '0' and symbol <= '9':
return int(symbol)
if symbol >= 'A' and symbol <= 'Z':
return ord(symbol) - ord('A') + 10
if symbol >= 'a' and symbol <= 'z':
return ord(symbol) - ord('a') + 36
print("USAGE : <number> <original-base> <new-base>")
numbers = input().split(' ')
if len(numbers) != 3:
sys.exit("Invalid Usage!")
if numbers[0].isalnum():
num = numbers[0]
else:
sys.exit("Invalid Number!")
try:
base = int(numbers[1])
new_base = int(numbers[2])
except ValueError:
sys.exit("Invalid Base!")
# Get Numeric Value of given Number
val = 0
i = 0
for digit in reversed(num):
digit_val = value(digit)
if digit_val >= base:
sys.exit("Invalid base-%d Number!" %(base))
val += (base**i) * digit_val
i += 1
# Get representation of number in New Base
rep = ""
while val > 0:
remainder = val % new_base
val = val // new_base
rep = symbol(remainder) + rep
print("( %s )%d = ( %s )%d" %(num, base, rep, new_base))
| 3.625
| 4
|
modules/MemberList.py
|
PressureRX/OverwatchClanUnionBot
| 1
|
12774197
|
import json, time
from datetime import datetime
class MemberList:
def __init__(self):
self.Name = ""
self.Update = ""
self.Members = ""
def MessageFormVerify(self, content):
content = content
if '#' in content and '\n' in content:
return True
return False
def MessageFormPharse(self, message):
self.Members = message.split('\n')
def GetClanName(self, staffList, message):
author = message.author.name + "#" + message.author.discriminator
self.Name = staffList[author]
def GetUpdate(self):
self.Update = time.time()
def Save(self):
with open('datas/ClanList.json', 'r+', encoding='utf-8-sig') as clanListFile :
clanLists = json.load(clanListFile)
clanLists[self.Name]['Update'] = self.Update
clanLists[self.Name]['Members'] = self.Members
clanListFile.truncate(0)
clanListFile.seek(0)
clanListFile.write(json.dumps(clanLists,ensure_ascii=False, indent=4))
def GetStrUpdateTime(self):
return datetime.fromtimestamp(self.Update).strftime('%Y-%m-%d %H:%M:%S')
def ToText(self):
return '\n'.join(self.Members)
| 3
| 3
|
5_back_propagation.py
|
leegeunhyeok/deep-learning-study
| 0
|
12774198
|
import numpy as np
from common import numerical_gradient, softmax, cross_entropy_error
from collections import OrderedDict
class Relu:
def __init__(self):
self.mask = None
def forward(self, x):
self.mask = (x <= 0)
out = x.copy()
out[self.mask] = 0
return out
def backward(self, dout):
dout[self.mask] = 0
dx = dout
return dx
class Affine:
def __init__(self, W, b):
self.W = W
self.b = b
self.x = None
self.dW = None
self.db = None
def forward(self, x):
self.x = x
return np.dot(x, self.W) + self.b
def backward(self, dout):
dx = np.dot(dout, self.W.T)
self.dW = np.dot(self.x.T, dout)
self.db = np.sum(dout, axis=0)
return dx
class SoftmaxWithLoss:
def __init__(self):
self.loss = None
self.y = None
self.x = None
def forward(self, x, t):
self.t = t
self.y = softmax(x)
self.loss = cross_entropy_error(self.y, self.t)
return self.loss
def backward(self, dout=1):
batch_size = self.t.shape[0]
dx = (self.y - self.t) / batch_size
return dx
class TowLayerNet:
def __init__(self, input_size, hidden_size, output_size,
weight_init_std=0.01):
self.params = {}
self.params['W1'] = weight_init_std * \
np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = weight_init_std * \
np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
self.layers = OrderedDict()
self.layers['Affine1'] = \
Affine(self.params['W1'], self.params['b1'])
self.layers['Relu1'] = Relu()
self.layers['Affine2'] = \
Affine(self.params['W2'], self.params['b2'])
self.lastLayer = SoftmaxWithLoss()
def predict(self, x):
for layer in self.layers.values():
x = layer.forward(x)
return x
def loss(self, x, t):
y = self.predict(x)
return self.lastLayer.forward(y, t)
def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y, axis=1)
if t.ndim != 1:
t = np.argmax(t, axis=1)
accuracy = np.sum(y == t) / float(x.shape[0])
return accuracy
def numerical_gradient(self, x, t):
loss_W = lambda W: self.loss(x, t)
grads = {}
grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
return grads
def gradient(self, x, t):
# 순전파
self.loss(x, t)
dout = 1
dout = self.lastLayer.backward(dout)
layers = list(self.layers.values())
layers.reverse()
for layer in layers:
dout = layer.backward(dout)
grads = {}
grads['W1'] = self.layers['Affine1'].dW
grads['b1'] = self.layers['Affine1'].db
grads['W2'] = self.layers['Affine2'].dW
grads['b2'] = self.layers['Affine2'].db
return grads
# ========== ========== #
# 오차역전파법을 통해 구한 기울기 검증하기
from dataset.mnist import load_mnist
(x_train, t_train), (x_test, t_test) = \
load_mnist(normalize=True, one_hot_label=True)
network = TowLayerNet(input_size=784, hidden_size=50, output_size=10)
x_batch = x_train[:3]
t_batch = t_train[:3]
grad_numerical = network.numerical_gradient(x_batch, t_batch)
grad_backprop = network.gradient(x_batch, t_batch)
for k in grad_numerical.keys():
diff = np.average(np.abs(grad_backprop[k] - grad_numerical[k]))
print(k + ' : ' + str(diff))
# ========== ========== #
# 학습 구현
iters_num = 10000
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1
train_loss_list = []
train_acc_list = []
test_acc_list = []
iter_per_epoch = max(train_size / batch_size, 1)
print('#========== ==========#')
print('iters_num: %s' % str(iters_num))
print('train_size: %s' % str(train_size))
print('batch_size: %s' % str(batch_size))
print('learning_rate: %s' % str(learning_rate))
print('iter_per_epoch: %s' % str(iter_per_epoch))
print('#========== ==========#')
for i in range(iters_num):
batch_mask = np.random.choice(train_size, batch_size)
# print(batch_mask)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
# 오차역전파법으로 기울기 구하기
grad = network.gradient(x_batch, t_batch)
# 갱신
for key in ('W1', 'b1', 'W2', 'b2'):
network.params[key] -= learning_rate * grad[key]
loss = network.loss(x_batch, t_batch)
train_loss_list.append(loss)
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
print(train_acc, test_acc)
| 2.671875
| 3
|
setup.py
|
jmschrei/yabn
| 2
|
12774199
|
<filename>setup.py<gh_stars>1-10
from distutils.core import setup
from distutils.extension import Extension
import numpy as np
try:
from Cython.Distutils import build_ext
except ImportError:
use_cython = False
else:
use_cython = True
cmdclass = { }
if use_cython:
ext_modules = [
Extension("yabn.yabn", [ "yabn/yabn.pyx" ], include_dirs=[np.get_include()]),
]
cmdclass.update({ 'build_ext': build_ext })
else:
ext_modules = [
Extension("yabn.yabn", [ "yabn/yabn.c" ], include_dirs=[np.get_include()]),
]
setup(
name='yabn',
version='0.1.0',
author='<NAME>',
author_email='<EMAIL>',
packages=['yahbn'],
url='http://pypi.python.org/pypi/yabn/',
license='LICENSE.txt',
description='YABN is a Bayesian Network package for Python, implemented in Cython for speed.',
cmdclass=cmdclass,
ext_modules=ext_modules,
install_requires=[
"cython >= 0.20.1",
"numpy >= 1.8.0",
"scipy >= 0.13.3",
"networkx >= 1.8.1"
],
)
| 1.710938
| 2
|
package/PartSegCore/interpolation/setup.py
|
monotropauniflora/PartSeg
| 0
|
12774200
|
<gh_stars>0
from distutils.core import setup
from distutils.extension import Extension
import numpy as np
from Cython.Build import cythonize
extensions = [Extension("bilinear_interpolation", ["bilinear_interpolation.pyx"], include_dirs=[np.get_include()])]
setup(ext_modules=cythonize(extensions), name="coloring image")
| 1.414063
| 1
|
Python/stochastic.py
|
Hieuqng/Stochastic-Modelling
| 0
|
12774201
|
#!/usr/bin/env python
import numpy as np
from scipy.stats import norm
def bachelier(So, K, sigma, T, option_type):
'''
Calculate European option price using Bachelier model:
dSt = sigma * S0 * dWt
St = S0*(1 + sigma*Wt)
Parameter
---------
So: float
price of underlying asset at time 0
K: float
strike price of option
sigma: float
variance of Brownian motion
T: float
length of time
option_type: str
type of European option.
Including: van call/put (vanilla), con call/put (cash-or-nothing), aon call/put (asset-or-nothing)
Return
------
val: value of the option at time 0
'''
xs = (K-So) / (So * sigma * np.sqrt(T))
val = None
if So == K:
return sigma*So*np.sqrt(T/(2*np.pi))
if option_type == 'van call':
val = (So - K) * norm.cdf(-xs) + So*sigma*np.sqrt(T)*norm.pdf(-xs)
elif option_type == 'van put':
val = (K - So) * norm.cdf(xs) + So*sigma*np.sqrt(T)*norm.pdf(xs)
elif option_type == 'con call':
val = norm.cdf(-xs)
elif option_type == 'con put':
val = norm.cdf(xs)
elif option_type == 'aon call':
val = So*norm.cdf(-xs) + So*sigma*np.sqrt(T)*norm.pdf(-xs)
elif option_type == 'aon put':
val = So*norm.cdf(xs) - So*sigma*np.sqrt(T)*norm.pdf(xs)
else:
raise(ValueError("Option type is invalid. " +
"Should be either 'van call', 'van put', 'con call', 'con put', 'aon call', or 'aon put'"))
return val
def black_scholes(So, K, r, sigma, T, option_type):
'''
Calculate European option price using Black-Scholes (1973) model:
dSt = r*dSt + sigma*St*dWt
St = S0*exp{(r-sigma^2/2)t + sigma*Wt}
Parameter
---------
So: float
price of underlying asset at time 0
K: float
strike price of option
r: float
drift of St
sigma: float
variance of Brownian motion
T: float
length of time
option_type: str
type of European option.
Including: van call/put (vanilla), con call/put (cash-or-nothing), aon call/put (asset-or-nothing)
Return
------
val: value of the option at time 0
'''
d1 = (np.log(So/K) + (r+sigma**2/2)*T) / (sigma*np.sqrt(T))
d2 = (np.log(So/K) + (r-sigma**2/2)*T) / (sigma*np.sqrt(T))
val = None
if So == K:
return sigma*So*np.sqrt(T/(2*np.pi))
if option_type == 'van call':
val = So*norm.cdf(d1) - K*np.e**(-r*T)*norm.cdf(d2)
elif option_type == 'van put':
val = -So*norm.cdf(-d1) + K*np.e**(-r*T)*norm.cdf(-d2)
elif option_type == 'con call':
val = np.e**(-r*T) * norm.cdf(d2)
elif option_type == 'con put':
val = np.e**(-r*T) * norm.cdf(-d2)
elif option_type == 'aon call':
val = So*norm.cdf(d1)
elif option_type == 'aon put':
val = So*norm.cdf(-d1)
else:
raise(ValueError("Option type is invalid. " +
"Should be either 'van call', 'van put', 'con call', 'con put', 'aon call', or 'aon put'"))
return val
| 2.984375
| 3
|
tests/builtins/test_dir.py
|
olgapinheiro/batavia
| 1
|
12774202
|
<filename>tests/builtins/test_dir.py
from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class DirTests(TranspileTestCase):
pass
class BuiltinDirFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
function = "dir"
not_implemented = [
'test_class',
]
| 1.476563
| 1
|
api/resources/v1/logs.py
|
thegrowthapp/backend
| 1
|
12774203
|
<filename>api/resources/v1/logs.py
from flask import request
from flask_jwt_extended import get_jwt_identity, jwt_required
from flask_restful import Resource
from ...models import Log, Goal
from ...schemas import LogSchema
from .responses import respond
class LogListResource(Resource):
@jwt_required
def get(self):
schema = LogSchema(many=True, only=Fields.Log.compact)
try:
goal = Goal.objects.get(id=request.args['goal_id'])
except (DoesNotExist, ValidationError) as e:
return respond(404, {}, ['Goal does not exist', str(e)])
logs = goal.logs
if get_jwt_identity() not in goal.growthbook.collaborating_identities():
return respond(403, {}, ['Access forbidden'])
return respond(200, {'logs': schema.dump(logs).data})
@jwt_required
def post(self):
schema = LogSchema()
log = Log(**schema.load(request.args).data)
log.user = User.objects.get(username=get_jwt_identity())
try:
goal = Goal.objects.get(id=request.args['goal_id'])
except (DoesNotExist, ValidationError) as e:
return respond(404, {}, ['Goal does not exist', str(e)])
if get_jwt_identity() not in goal.growthbook.collaborating_identities():
return respond(403, {}, ['Access forbidden'])
try:
goal.logs.append(log)
goal.save()
except (NotUniqueError, ValidationError) as e:
return respond(400, {}, ['Validation error', str(e)])
return respond(201, {'log': schema.dump(log).data})
class LogResource(Resource):
@jwt_required
def get(self, id):
try:
log = Log.objects.get(id=id)
except (DoesNotExist, ValidationError):
return respond(404, {}, ['Log does not exist'])
if get_jwt_identity() in log._instance.collaborating_identities():
schema = LogSchema()
else:
return respond(403, {}, ['Access forbidden'])
return respond(200, {'log': schema.dump(log).data})
@jwt_required
def put(self, id):
try:
log = Log.objects.get(id=id)
except (DoesNotExist, ValidationError):
return respond(404, {}, ['Log does not exist'])
if get_jwt_identity() in log._instance.collaborating_identities():
schema = LogSchema()
else:
return respond(403, {}, ['Access forbidden'])
try:
log.update(**schema.dump(log).data)
# Return updated document
log = Log.objects.get(id=id)
except (NotUniqueError, ValidationError) as e:
return respond(400, {}, ['Validation error', str(e)])
return respond(200, {'log': schema.dump(log).data})
@jwt_required
def delete(self, id):
try:
log = Log.objects.get(id=id)
except (DoesNotExist, ValidationError):
return respond(404, {}, ['Log does not exist'])
if get_jwt_identity() not in log._instance.collaborating_identities():
return respond(403, {}, ['Access forbidden'])
log.delete()
return respond(204)
| 2.265625
| 2
|
calico/openstack/test/test_plugin.py
|
fasaxc/felix
| 0
|
12774204
|
<reponame>fasaxc/felix
# -*- coding: utf-8 -*-
# Copyright 2014 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
openstack.test.test_plugin
~~~~~~~~~~~
Unit test for the Calico/OpenStack Plugin.
"""
import mock
import sys
import unittest
import eventlet
import eventlet.queue
import traceback
import json
import inspect
from eventlet.support import greenlets as greenlet
if 'zmq' in sys.modules:
del sys.modules['zmq']
sys.modules['neutron'] = m_neutron = mock.Mock()
sys.modules['neutron.common'] = m_neutron.common
sys.modules['neutron.openstack'] = m_neutron.openstack
sys.modules['neutron.openstack.common'] = m_neutron.openstack.common
sys.modules['neutron.plugins'] = m_neutron.plugins
sys.modules['neutron.plugins.ml2'] = m_neutron.plugins.ml2
sys.modules['neutron.plugins.ml2.drivers'] = m_neutron.plugins.ml2.drivers
sys.modules['oslo'] = m_oslo = mock.Mock()
sys.modules['oslo.config'] = m_oslo.config
sys.modules['time'] = m_time = mock.Mock()
# Define a stub class, that we will use as the base class for
# CalicoMechanismDriver.
class DriverBase(object):
def __init__(self, agent_type, vif_type, vif_details):
pass
# Replace Neutron's SimpleAgentMechanismDriverBase - which is the base class
# that CalicoMechanismDriver inherits from - with this stub class.
m_neutron.plugins.ml2.drivers.mech_agent.SimpleAgentMechanismDriverBase = \
DriverBase
import calico.openstack.mech_calico as mech_calico
REAL_EVENTLET_SLEEP_TIME = 0.2
# Test variation flags.
NO_HEARTBEAT_RESPONSE = 1
NO_ENDPOINT_RESPONSE = 2
# Value used to indicate 'timeout' in poll and sleep processing.
TIMEOUT_VALUE = object()
class TestPlugin(unittest.TestCase):
@classmethod
def setUpClass(cls):
global real_eventlet_sleep
global real_eventlet_spawn
# Replacement for eventlet.sleep: sleep for some simulated passage of
# time (as directed by simulated_time_advance), instead of for real
# elapsed time.
def simulated_time_sleep(secs):
# Create a new queue.
queue = eventlet.Queue(1)
queue.stack = inspect.stack()[1][3]
# Add it to the dict of sleepers, together with the waking up time.
sleepers[queue] = current_time + secs
print "T=%s: %s: Start sleep for %ss until T=%s" % (
current_time, queue.stack, secs, sleepers[queue]
)
# Do a zero time real sleep, to allow other threads to run.
real_eventlet_sleep(REAL_EVENTLET_SLEEP_TIME)
# Block until something is posted to the queue.
ignored = queue.get(True)
# Wake up.
return None
# Replacement for eventlet.spawn: track spawned threads so that we can
# kill them all when a test case ends.
def simulated_spawn(*args):
# Do the real spawn.
thread = real_eventlet_spawn(*args)
# Remember this thread.
threads.append(thread)
# Also return it.
return thread
# Hook sleeping. We must only do this once; hence it is in setUpClass
# rather than in setUp.
real_eventlet_sleep = eventlet.sleep
mech_calico.eventlet.sleep = simulated_time_sleep
# Similarly hook spawning.
real_eventlet_spawn = eventlet.spawn
mech_calico.eventlet.spawn = simulated_spawn
@classmethod
def tearDownClass(cls):
# Restore the real eventlet.sleep.
mech_calico.eventlet.sleep = real_eventlet_sleep
# Setup for explicit test code control of all operations on 0MQ sockets.
def setUp_sockets(self):
# Set of addresses that we have sockets bound to.
self.sockets = set()
# When a socket is created, print a message to say so, and hook its
# bind method.
def socket_created(tp):
print "New socket type %s" % tp
# Create a new mock socket.
socket = mock.Mock()
# Hook its bind and connect methods, so we can remember the address
# that it binds or connects to.
socket.bind.side_effect = make_socket_bound(socket)
socket.connect.side_effect = make_socket_connect(socket)
# Create a queue that we can use to deliver messages to be received
# on this socket.
socket.rcv_queue = eventlet.Queue(1)
# Hook the socket's recv_multipart and poll methods, to wait on
# this queue.
socket.recv_multipart.side_effect = make_recv('multipart', socket)
socket.recv_json.side_effect = make_recv('json', socket)
socket.poll.side_effect = make_poll(socket)
# Add this to the test code's list of known sockets.
self.sockets.add(socket)
return socket
# When a socket binds to an address, remember that address.
def make_socket_bound(socket):
def socket_bound(addr):
print "Socket %s bound to %s" % (socket, addr)
# Remember the address.
socket.bound_address = addr
return None
return socket_bound
# When a socket connects to an address, remember that address.
def make_socket_connect(socket):
def socket_connect(addr):
print "Socket %s connected to %s" % (socket, addr)
# Remember the address.
socket.connected_address = addr
return None
return socket_connect
# When socket calls recv_multipart or recv_json, block on the socket's
# receive queue.
def make_recv(name, socket):
def recv(flags=0, *args):
print "Socket %s recv_%s..." % (socket, name)
# Block until there's something to receive, and then get that.
try:
msg = socket.rcv_queue.get(not (flags &
mech_calico.zmq.NOBLOCK))
except eventlet.queue.Empty:
raise mech_calico.Again()
# Return that.
return msg
return recv
# When socket calls poll, block on the socket's receive queue.
def make_poll(socket):
def poll(ms):
print "Socket %s poll for %sms..." % (socket, ms)
# Add this socket's receive queue to the set of current
# sleepers.
socket.rcv_queue.stack = inspect.stack()[1][3]
sleepers[socket.rcv_queue] = current_time + ms / 1000
# Block until there's something added to the queue.
msg = socket.rcv_queue.get(True)
# If what was added was not the timeout indication, put it back
# on the queue, for a following receive call.
if msg is not TIMEOUT_VALUE:
socket.rcv_queue.put_nowait(msg)
real_eventlet_sleep(REAL_EVENTLET_SLEEP_TIME)
# Return nothing.
return None
return poll
# Intercept 0MQ socket creations, so that we can hook all of the
# operations on sockets, using the methods above.
mech_calico.zmq.Context = mock.Mock()
self.zmq_context = mech_calico.zmq.Context.return_value
self.zmq_context.socket.side_effect = socket_created
# Setup to intercept and display logging by the code under test.
def setUp_logging(self):
# Print logs to stdout.
def log_info(msg):
print " INFO %s" % msg
return None
def log_debug(msg):
print " DEBUG %s" % msg
return None
def log_warn(msg):
print " WARN %s" % msg
return None
def log_error(msg):
print " ERROR %s" % msg
return None
def log_exception(msg):
print " EXCEPTION %s" % msg
if sys.exc_type is not greenlet.GreenletExit:
traceback.print_exc()
return None
# Hook logging.
mech_calico.LOG = mock.Mock()
mech_calico.LOG.info.side_effect = log_info
mech_calico.LOG.debug.side_effect = log_debug
mech_calico.LOG.warn.side_effect = log_warn
mech_calico.LOG.error.side_effect = log_error
mech_calico.LOG.exception.side_effect = log_exception
# Setup to intercept sleep calls made by the code under test, and hence to
# (i) control when those expire, and (ii) allow time to appear to pass (to
# the code under test) without actually having to wait for that time.
def setUp_time(self):
global current_time
global sleepers
global threads
# Reset the simulated time (in seconds) that has passed since the
# beginning of the test.
current_time = 0
# Make time.time() return current_time.
m_time.time.side_effect = lambda: current_time
# Reset the dict of current sleepers. In each dict entry, the key is
# an eventlet.Queue object and the value is the time at which the sleep
# should complete.
sleepers = {}
threads = []
print "\nTEST CASE: %s" % self.id()
# Method for the test code to call when it wants to advance the simulated
# time.
def simulated_time_advance(self, secs):
global current_time
while (secs > 0):
print "T=%s: Want to advance by %s" % (current_time, secs)
# Determine the time to advance to in this iteration: either the
# full time that we've been asked for, or the time at which the
# next sleeper should wake up, whichever of those is earlier.
wake_up_time = current_time + secs
for queue in sleepers.keys():
if sleepers[queue] < wake_up_time:
# This sleeper will wake up before the time that we've been
# asked to advance to.
wake_up_time = sleepers[queue]
# Check if we're about to advance past any exact multiples of
# HEARTBEAT_SEND_INTERVAL_SECS.
num_acl_pub_heartbeats = (
int(wake_up_time / mech_calico.HEARTBEAT_SEND_INTERVAL_SECS) -
int(current_time / mech_calico.HEARTBEAT_SEND_INTERVAL_SECS)
)
# Advance to the determined time.
secs -= (wake_up_time - current_time)
current_time = wake_up_time
print "T=%s" % current_time
# Wake up all sleepers that should now wake up.
for queue in sleepers.keys():
if sleepers[queue] <= current_time:
print "T=%s >= %s: %s: Wake up!" % (current_time,
sleepers[queue],
queue.stack)
del sleepers[queue]
queue.put_nowait(TIMEOUT_VALUE)
# Allow woken (and possibly other) threads to run.
real_eventlet_sleep(REAL_EVENTLET_SLEEP_TIME)
# Handle any ACL HEARTBEAT publications.
for i in range(num_acl_pub_heartbeats):
print "Handle ACL HEARTBEAT publication"
pub = {'type': 'HEARTBEAT',
'issued': current_time * 1000}
self.acl_pub_socket.send_multipart.assert_called_once_with(
['networkheartbeat'.encode('utf-8'),
json.dumps(pub).encode('utf-8')])
self.acl_pub_socket.send_multipart.reset_mock()
# Setup before each test case (= each method below whose name begins with
# "test").
def setUp(self):
# Normally do not provide bind_host config.
m_oslo.config.cfg.CONF.bind_host = None
# Setup to control 0MQ socket operations.
self.setUp_sockets()
# Setup to control logging.
self.setUp_logging()
# Setup to control the passage of time.
self.setUp_time()
# Create an instance of CalicoMechanismDriver.
self.driver = mech_calico.CalicoMechanismDriver()
# Tear down after each test case.
def tearDown(self):
print "\nClean up remaining green threads..."
for thread in threads:
thread.kill()
# Check that a socket is now bound to a specified address and port, and
# return that socket.
def assert_get_bound_socket(self, addr, port):
bound_sockets = set(socket for socket in self.sockets
if socket.bound_address == ("tcp://%s:%s" %
(addr, port)))
self.assertEqual(len(bound_sockets), 1)
return bound_sockets.pop()
# Test binding to a specific IP address.
def test_bind_host(self):
# Provide bind_host config.
ip_addr = '192.168.1.1'
m_oslo.config.cfg.CONF.bind_host = ip_addr
# Tell the driver to initialize.
self.driver.initialize()
# Check that sockets are bound to the specific IP address.
self.felix_router_socket = self.assert_get_bound_socket(ip_addr, 9901)
self.acl_get_socket = self.assert_get_bound_socket(ip_addr, 9903)
self.acl_pub_socket = self.assert_get_bound_socket(ip_addr, 9904)
# Mainline test.
def test_mainline(self):
# Start of day processing: initialization and socket binding.
self.start_of_day()
# Connect a Felix instance.
self.felix_connect()
# Further mainline steps that we haven't actually implemented yet.
self.acl_connect()
self.call_noop_entry_points()
self.new_endpoint()
self.endpoint_update()
self.sg_rule_update()
self.endpoint_deletion()
# Test when plugin sends a HEARTBEAT request and Felix does not respond
# within HEARTBEAT_RESPONSE_TIMEOUT.
def test_no_heartbeat_response(self):
# Start of day processing: initialization and socket binding.
self.start_of_day()
# Connect a Felix instance.
self.felix_connect(flags=set([NO_HEARTBEAT_RESPONSE]))
self.sockets.remove(self.felix_endpoint_socket)
# Check that it works for Felix to connect again after the plugin has
# cleaned up following that non-response.
self.felix_connect()
# Test when plugin sends an ENDPOINT* request and Felix does not respond
# within ENDPOINT_RESPONSE_TIMEOUT.
def test_no_endpoint_response(self):
# Start of day processing: initialization and socket binding.
self.start_of_day()
# Connect a Felix instance.
self.felix_connect()
# Process a new endpoint, but don't send in the ENDPOINTCREATED
# response.
self.new_endpoint(flags=set([NO_ENDPOINT_RESPONSE]))
self.sockets.remove(self.felix_endpoint_socket)
# Let time pass to allow the felix_heartbeat_thread for the old
# connection to die. It's a bug that we need to do this: Github issue
# #224.
self.simulated_time_advance(40)
# Connect the Felix instance again.
self.felix_connect()
# Now process the new endpoint successfully.
self.new_endpoint()
def start_of_day(self):
# Tell the driver to initialize.
self.driver.initialize()
# Check that there's a socket bound to port 9901, and get it.
self.felix_router_socket = self.assert_get_bound_socket('*', 9901)
print "Felix router socket is %s" % self.felix_router_socket
# Check that there's a socket bound to port 9903, and get it.
self.acl_get_socket = self.assert_get_bound_socket('*', 9903)
print "ACL GET socket is %s" % self.acl_get_socket
# Check that there's a socket bound to port 9904, and get it.
self.acl_pub_socket = self.assert_get_bound_socket('*', 9904)
print "ACL PUB socket is %s" % self.acl_pub_socket
def felix_connect(self, **kwargs):
# Hook the Neutron database.
self.db = mech_calico.manager.NeutronManager.get_plugin()
self.db_context = mech_calico.ctx.get_admin_context()
self.db.get_ports.return_value = []
# Get test variation flags.
flags = kwargs.get('flags', set())
# Send a RESYNCSTATE.
resync = {'type': 'RESYNCSTATE',
'resync_id': 'resync#1',
'issued': current_time * 1000,
'hostname': 'felix-host-1'}
self.felix_router_socket.rcv_queue.put_nowait(
['felix-1',
'',
json.dumps(resync).encode('utf-8')])
real_eventlet_sleep(REAL_EVENTLET_SLEEP_TIME)
# Check DB got create_or_update_agent call.
self.db.create_or_update_agent.assert_called_once_with(
self.db_context,
{'agent_type': mech_calico.AGENT_TYPE_FELIX,
'binary': '',
'host': 'felix-host-1',
'topic': mech_calico.constants.L2_AGENT_TOPIC,
'start_flag': True})
self.db.create_or_update_agent.reset_mock()
# Check RESYNCSTATE response was sent.
self.felix_router_socket.send_multipart.assert_called_once_with(
['felix-1',
'',
json.dumps({'type': 'RESYNCSTATE',
'endpoint_count': 0,
'interface_prefix': 'tap',
'rc': 'SUCCESS',
'message': 'Здра́вствуйте!'}).encode('utf-8')])
self.felix_router_socket.send_multipart.reset_mock()
# Send HEARTBEAT from Felix and check for response.
self.felix_router_socket.rcv_queue.put_nowait(
['felix-1',
'',
json.dumps({'type': 'HEARTBEAT'}).encode('utf-8')])
real_eventlet_sleep(REAL_EVENTLET_SLEEP_TIME)
self.felix_router_socket.send_multipart.assert_called_once_with(
['felix-1',
'',
json.dumps({'type': 'HEARTBEAT'}).encode('utf-8')])
self.felix_router_socket.send_multipart.reset_mock()
# Get the socket that the plugin used to connect back to Felix.
connected_sockets = set(socket for socket in self.sockets
if (socket.connected_address ==
"tcp://felix-host-1:9902"))
self.assertEqual(len(connected_sockets), 1)
self.felix_endpoint_socket = connected_sockets.pop()
print "Felix endpoint socket is %s" % self.felix_endpoint_socket
# Need another yield here, apparently, to allow felix_heartbeat_thread
# to start running.
real_eventlet_sleep(REAL_EVENTLET_SLEEP_TIME)
# Receive HEARTBEAT to Felix from the plugin, and send response.
self.simulated_time_advance(30)
self.felix_endpoint_socket.send_json.assert_called_once_with(
{'type': 'HEARTBEAT'},
mech_calico.zmq.NOBLOCK)
self.felix_endpoint_socket.send_json.reset_mock()
if NO_HEARTBEAT_RESPONSE in flags:
# Advance time by more than HEARTBEAT_RESPONSE_TIMEOUT.
self.simulated_time_advance((mech_calico.HEARTBEAT_RESPONSE_TIMEOUT
/ 1000) + 1)
# The plugin now cleans up its Felix socket.
return
self.felix_endpoint_socket.rcv_queue.put_nowait(
{'type': 'HEARTBEAT'})
real_eventlet_sleep(REAL_EVENTLET_SLEEP_TIME)
real_eventlet_sleep(REAL_EVENTLET_SLEEP_TIME)
# Check DB got create_or_update_agent call.
self.db.create_or_update_agent.assert_called_once_with(
self.db_context,
{'agent_type': mech_calico.AGENT_TYPE_FELIX,
'binary': '',
'host': 'felix-host-1',
'topic': mech_calico.constants.L2_AGENT_TOPIC})
self.db.create_or_update_agent.reset_mock()
# Yield to allow anything pending on other threads to come out.
real_eventlet_sleep(REAL_EVENTLET_SLEEP_TIME)
def acl_connect(self):
# ACL Manager connection.
#
# - sim-DB: Prep response to next get_security_groups query, returning
# the default SG. Prep null response to next
# _get_port_security_group_bindings call.
#
# - sim-ACLM: Connect to PLUGIN_ACLGET_PORT, send GETGROUPS, check get
# GETGROUPS response. Check get GROUPUPDATE publication describing
# default SG.
#
# - sim-ACLM: Send HEARTBEAT, check get HEARTBEAT response.
#
# - sim-ACLM: Wait for HEARTBEAT_SEND_INTERVAL_SECS, check get
# HEARTBEAT, send HEARTBEAT response.
pass
def call_noop_entry_points(self):
# Mechanism driver entry points that are currently implemented as
# no-ops (because Calico function does not need them).
#
# - sim-ML2: Call update_subnet_postcommit, update_network_postcommit,
# delete_subnet_postcommit, delete_network_postcommit,
# create_network_postcommit, create_subnet_postcommit,
# update_network_postcommit, update_subnet_postcommit.
pass
# New endpoint processing.
def new_endpoint(self, **kwargs):
# Get test variation flags.
flags = kwargs.get('flags', set())
# Simulate ML2 asking the driver if it can handle a port.
self.assertTrue(self.driver.check_segment_for_agent(
{mech_calico.api.NETWORK_TYPE: 'flat'},
mech_calico.constants.AGENT_TYPE_DHCP
))
# Prep response to next get_subnet call.
self.db.get_subnet.return_value = {'gateway_ip': '10.65.0.1'}
# Simulate ML2 notifying creation of the new port.
context = mock.Mock()
context._port = {'binding:host_id': 'felix-host-1',
'id': 'DEADBEEF-1234-5678',
'device_owner': 'compute:nova',
'fixed_ips': [{'subnet_id': '10.65.0/24',
'ip_address': '10.65.0.2'}],
'mac_address': '00:11:22:33:44:55',
'admin_state_up': True}
real_eventlet_spawn(
lambda: self.driver.create_port_postcommit(context))
real_eventlet_sleep(REAL_EVENTLET_SLEEP_TIME)
# Check ENDPOINTCREATED request is sent to Felix. Simulate Felix
# responding successfully.
self.felix_endpoint_socket.send_json.assert_called_once_with(
{'mac': '00:11:22:33:44:55',
'addrs': [{'properties': {'gr': False},
'addr': '10.65.0.2',
'gateway': '10.65.0.1'}],
'endpoint_id': 'DEADBEEF-1234-5678',
'interface_name': 'tapDEADBEEF-12',
'issued': mock.ANY,
'resync_id': None,
'type': 'ENDPOINTCREATED',
'state': 'enabled'},
mech_calico.zmq.NOBLOCK)
self.felix_endpoint_socket.send_json.reset_mock()
if NO_ENDPOINT_RESPONSE in flags:
# Advance time by more than ENDPOINT_RESPONSE_TIMEOUT.
self.simulated_time_advance((mech_calico.ENDPOINT_RESPONSE_TIMEOUT
/ 1000) + 1)
# The plugin now cleans up its Felix socket.
return
self.felix_endpoint_socket.rcv_queue.put_nowait(
{'type': 'ENDPOINTCREATED',
'rc': 'SUCCESS',
'message': ''})
real_eventlet_sleep(REAL_EVENTLET_SLEEP_TIME)
real_eventlet_sleep(REAL_EVENTLET_SLEEP_TIME)
# Check get update_port_status call, indicating port active.
self.db.update_port_status.assert_called_once_with(
context._plugin_context,
context._port['id'],
mech_calico.constants.PORT_STATUS_ACTIVE)
self.db.update_port_status.reset_mock()
# Prep appropriate responses for next get_security_group,
# _get_port_security_group_bindings and get_port calls.
self.db.get_security_group.return_value = {
'id': 'SG-1',
'security_group_rules': []
}
self.db._get_port_security_group_bindings.return_value = [
{'port_id': 'DEADBEEF-1234-5678'}
]
self.db.get_port.return_value = context._port
# Call security_groups_member_updated with default SG ID.
self.db.notifier.security_groups_member_updated(context, ['SG-1'])
# Check get GROUPUPDATE publication indicating port added to default SG
# ID.
pub = {'rules': {'inbound': [],
'outbound': [],
'outbound_default': 'deny',
'inbound_default': 'deny'},
'group': 'SG-1',
'type': 'GROUPUPDATE',
'members': {'DEADBEEF-1234-5678': ['10.65.0.2']},
'issued': current_time * 1000}
# Unpack the last self.acl_pub_socket.send_multipart call, to check
# that its args were as expected. It doesn't work to check the
# arguments directly using assert_called_once_with(...), because
# variation is possible when a dict such as 'pub' is represented as a
# string.
kall = self.acl_pub_socket.send_multipart.call_args
assert kall is not None
args, kwargs = kall
assert len(args) == 1
assert len(args[0]) == 2
assert args[0][0].decode('utf-8') == 'groups'
assert json.loads(args[0][1].decode('utf-8')) == pub
self.acl_pub_socket.send_multipart.reset_mock()
def endpoint_update(self):
# Endpoint update processing.
#
# - sim-DB: Prep response to next get_subnet call.
#
# - sim-ML2: Call update_port_postcommit for an endpoint port with
# host_id matching sim-Felix.
#
# - sim-Felix: Check get ENDPOINTUPDATED. Send successful response.
pass
def sg_rule_update(self):
# SG rules update processing.
#
# - sim-DB: Prep appropriate responses for next get_security_group,
# _get_port_security_group_bindings and get_port calls.
#
# - sim-ML2: Call security_groups_rule_updated with default SG ID.
#
# - sim-ACLM: Check get GROUPUPDATE publication indicating updated
# rules.
pass
def endpoint_deletion(self):
# Endpoint deletion processing.
#
# - sim-ML2: Call delete_port_postcommit for an endpoint port with
# host_id matching sim-Felix.
#
# - sim-Felix: Check get ENDPOINTDESTROYED. Send successful response.
#
# - sim-DB: Prep appropriate responses for next get_security_group,
# _get_port_security_group_bindings and get_port calls.
#
# - sim-ML2: Call security_groups_member_updated with default SG ID.
#
# - sim-ACLM: Check get GROUPUPDATE publication indicating port removed
# from default SG ID.
pass
def test_timing_new_endpoint(self):
# Tell the driver to initialize.
self.driver.initialize()
# Repeat mainline test with variation: for a new endpoint, sim-ML2
# calls security_groups_member_updated before create_port_postcommit,
# instead of after it.
def test_timing_endpoint_deletion(self):
# Tell the driver to initialize.
self.driver.initialize()
# Repeat mainline test with variation: for an endpoint being deleted,
# sim-ML2 calls security_groups_member_updated before
# delete_port_postcommit, instead of after it.
def test_multiple_2(self):
# Tell the driver to initialize.
self.driver.initialize()
# Connect two Felix instances. Create multiple endpoints, with host-id
# selecting one of the available Felices.
#
# Check plugin sends HEARTBEATs to both instances and correctly
# processes HEARTBEATs from both instances.
#
# Create lots of endpoints, spread across the two instances. Then get
# both instances to send RESYNCSTATE at the same time.
def test_multiple_10(self):
# Tell the driver to initialize.
self.driver.initialize()
# Connect 10 Felix instances. Create 100 endpoints, 10 for each
# instance. Put each endpoint into one of 10 SGs, so that each Felix
# has one endpoint in each of the 10 SGs. Get all 10 instances to send
# RESYNCSTATE in series (without any delay between them). Send
# GETGROUPS from ACL manager, check that all SGs are correctly resent
# to ACL manager.
# Tests of partners disconnecting and/or connectivity trouble...
#
# Test the following possible errors to various socket operations. These
# all represent different manifestations of networking connectivity
# trouble.
def test_felix_router_addr_in_use(self):
# Operations on the PLUGIN_ENDPOINT_PORT ROUTER socket.
#
# : self.felix_router_socket = self.zmq_context.socket(zmq.ROUTER)
#
# - 'Address in use' error when binding to PLUGIN_ENDPOINT_PORT.
pass
def test_acl_get_addr_in_use(self):
# Operations on the PLUGIN_ACLGET_PORT ROUTER socket.
#
# : self.acl_get_socket = self.zmq_context.socket(zmq.ROUTER)
#
# - 'Address in use' error when binding to PLUGIN_ACLGET_PORT.
pass
def test_acl_pub_addr_in_use(self):
# Operations on the PLUGIN_ACLPUB_PORT PUB socket.
#
# : self.acl_pub_socket = self.zmq_context.socket(zmq.PUB)
#
# - 'Address in use' error when binding to PLUGIN_ACLPUB_PORT.
pass
def test_felix_eagain_snd_endpoint(self):
# Operations on the FELIX_ENDPOINT_PORT REQ socket.
#
# : sock = self.zmq_context.socket(zmq.REQ)
# : sock.setsockopt(zmq.LINGER, 0)
# : sock.connect("tcp://%s:%s" % (hostname, FELIX_ENDPOINT_PORT))
# : self.felix_peer_sockets[hostname] = sock
#
# - 'EWOULDBLOCK' error when sending ENDPOINT* request.
pass
def test_felix_eagain_rcv_endpoint(self):
# - 'EWOULDBLOCK' error when receiving ENDPOINT* response.
pass
def test_felix_eagain_snd_heartbeat(self):
# - 'EWOULDBLOCK' error when sending HEARTBEAT request.
pass
def test_felix_eagain_rcv_heartbeat(self):
# - 'EWOULDBLOCK' error when receiving HEARTBEAT response.
pass
def test_connectivity_blips(self):
# Tell the driver to initialize.
self.driver.initialize()
# Test the following scenarios, to check that plugin processing is
# continuous and correct across connectivity blips.
#
# - Connect a Felix, and process a new endpoint for that Felix.
# Simulate disconnection and reconnection, in the form of a
# RESYNCSTATE on new connection but with same hostname. Check that
# the existing endpoint is sent on the new connection. Check that
# heartbeats occur as normal on the new connection.
#
# - Add another new endpoint for same hostname, and check it is
# processed normally and notified on the new connection.
#
# - Simulate disconnect and reconnect again, and check that both
# existing endpoints are notified on the new active connection (#3),
# after the new RESYNCSTATE.
def test_no_felix_new_endpoint(self):
# Tell the driver to initialize.
self.driver.initialize()
# ** Error cases
#
# Do new endpoint processing when required Felix is not available.
# Check that sim-ML2 sees a FelixUnavailable exception from its
# create_port_postcommit call.
#
# Call create_port_postcommit again with host-id changed to match a
# Felix that _is_ available. Check that new endpoint processing then
# proceeds normally.
def test_no_felix_endpoint_update(self):
# Tell the driver to initialize.
self.driver.initialize()
# Do endpoint update processing when required Felix is not available.
# Check that sim-ML2 sees a FelixUnavailable exception from its
# update_port_postcommit call.
def test_no_felix_endpoint_deleted(self):
# Tell the driver to initialize.
self.driver.initialize()
# Do endpoint deletion processing when required Felix is not available.
# Check that sim-ML2 sees a FelixUnavailable exception from its
# delete_port_postcommit call.
def test_code_coverage(self):
# Tell the driver to initialize.
self.driver.initialize()
# ** Code coverage
#
# After implementing and executing all of the above, review code
# coverage and add further tests for any mech_calico.py lines that have
# not yet been covered. (Or else persuade ourselves that we don't
# actually need those lines, and delete them.)
| 1.640625
| 2
|
tests/userprofile/decorators_test.py
|
BMeu/Aerarium
| 0
|
12774205
|
# -*- coding: utf-8 -*-
from unittest import TestCase
from flask import url_for
from flask_login import current_user
from flask_login import login_user
from werkzeug.exceptions import Forbidden
from werkzeug.wrappers import Response
from app import create_app
from app import db
from app.configuration import TestConfiguration
from app.userprofile import logout_required
from app.userprofile import Permission
from app.userprofile import permission_required
from app.userprofile import permission_required_all
from app.userprofile import permission_required_one_of
from app.userprofile import Role
from app.userprofile import User
class DecoratorsTest(TestCase):
def setUp(self):
"""
Initialize the test cases.
"""
self.app = create_app(TestConfiguration)
self.app_context = self.app.app_context()
self.app_context.push()
self.request_context = self.app.test_request_context()
self.request_context.push()
db.create_all()
def tearDown(self):
"""
Reset the test cases.
"""
db.session.remove()
db.drop_all()
self.request_context.pop()
self.app_context.pop()
@staticmethod
def view_function() -> str:
"""
A simple test "view" function.
:return: 'Decorated View'.
"""
return 'Decorated View'
def test_logout_required_logged_out(self):
"""
Test the `logout_required` decorator with an anonymous user.
Expected result: The decorated view function is returned.
"""
view_function = logout_required(self.view_function)
response = view_function()
self.assertEqual(self.view_function(), response)
def test_logout_required_logged_in(self):
"""
Test the `logout_required` decorator with a logged-in user.
Expected result: The redirect response to the home page is returned.
"""
email = '<EMAIL>'
name = '<NAME>'
user = User(email, name)
db.session.add(user)
db.session.commit()
login_user(user)
redirect_function = logout_required(self.view_function)
response = redirect_function()
self.assertIsInstance(response, Response)
self.assertEqual(302, response.status_code)
self.assertEqual(url_for('main.index'), response.location)
def test_permission_required_no_role(self):
"""
Test the `permission_required` decorator if the user does not have a role.
Expected result: The request is aborted with an error 403.
"""
# Ensure the user has no role.
self.assertFalse(hasattr(current_user, 'role'))
with self.assertRaises(Forbidden):
permission_required(Permission.EditRole)(self.view_function)()
def test_permission_required_no_permission(self):
"""
Test the `permission_required` decorator if the user does not have the requested permission.
Expected result: The request is aborted with an error 403.
"""
email = '<EMAIL>'
name = '<NAME>'
password = '<PASSWORD>'
user = User(email, name)
user.set_password(password)
user.role = Role('Administrator')
db.session.add(user)
db.session.commit()
user.login(email, password)
permission = Permission.EditRole
self.assertFalse(user.role.has_permission(permission))
with self.assertRaises(Forbidden):
permission_required(permission)(self.view_function)()
def test_permission_required_has_permission(self):
"""
Test the `permission_required` decorator if the user has the requested permission.
Expected result: The decorated view function is returned.
"""
email = '<EMAIL>'
name = '<NAME>'
password = '<PASSWORD>'
user = User(email, name)
user.set_password(password)
user.role = Role('Administrator')
db.session.add(user)
db.session.commit()
user.login(email, password)
permission = Permission.EditRole
user.role.permissions = permission
self.assertTrue(user.role.has_permission(permission))
view_function = permission_required(permission)(self.view_function)
response = view_function()
self.assertEqual(self.view_function(), response)
def test_permission_required_all_not_all_permissions(self):
"""
Test the `permission_required_all` decorator if the user does not have all the requested permissions.
Expected result: The request is aborted with an error 403.
"""
email = '<EMAIL>'
name = '<NAME>'
password = '<PASSWORD>'
user = User(email, name)
user.set_password(password)
user.role = Role('Administrator')
user.role.permissions = Permission.EditRole
db.session.add(user)
db.session.commit()
user.login(email, password)
self.assertTrue(user.role.has_permission(Permission.EditRole))
self.assertFalse(user.role.has_permission(Permission.EditUser))
with self.assertRaises(Forbidden):
permission_required_all(Permission.EditRole, Permission.EditUser)(self.view_function)()
def test_permission_required_all_has_permissions(self):
"""
Test the `permission_required` decorator if the user has all the requested permission.
Expected result: The decorated view function is returned.
"""
email = '<EMAIL>'
name = '<NAME>'
password = '<PASSWORD>'
user = User(email, name)
user.set_password(password)
user.role = Role('Administrator')
user.role.permissions = Permission.EditRole | Permission.EditUser
db.session.add(user)
db.session.commit()
user.login(email, password)
self.assertTrue(user.role.has_permissions_all(Permission.EditRole, Permission.EditUser))
view_function = permission_required_all(Permission.EditRole, Permission.EditUser)(self.view_function)
response = view_function()
self.assertEqual(self.view_function(), response)
def test_permission_required_one_of_no_permission(self):
"""
Test the `permission_required_one_of` decorator if the user does not have any of the requested permissions.
Expected result: The request is aborted with an error 403.
"""
email = '<EMAIL>'
name = '<NAME>'
password = '<PASSWORD>'
user = User(email, name)
user.set_password(password)
user.role = Role('Administrator')
db.session.add(user)
db.session.commit()
user.login(email, password)
self.assertEqual(Permission(0), user.role.permissions)
with self.assertRaises(Forbidden):
permission_required_one_of(Permission.EditRole, Permission.EditUser)(self.view_function)()
def test_permission_required_one_of_has_permission(self):
"""
Test the `permission_required` decorator if the user has one of the requested permission, but not all.
Expected result: The decorated view function is returned.
"""
email = '<EMAIL>'
name = '<NAME>'
password = '<PASSWORD>'
user = User(email, name)
user.set_password(password)
user.role = Role('Administrator')
user.role.permissions = Permission.EditRole
db.session.add(user)
db.session.commit()
user.login(email, password)
self.assertTrue(user.role.has_permission(Permission.EditRole))
self.assertFalse(user.role.has_permission(Permission.EditUser))
view_function = permission_required_one_of(Permission.EditRole, Permission.EditUser)(self.view_function)
response = view_function()
self.assertEqual(self.view_function(), response)
| 2.875
| 3
|
scripts/generate_image_per_word.py
|
nmningmei/uncon_semantic
| 0
|
12774206
|
<filename>scripts/generate_image_per_word.py
<<<<<<< HEAD
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 18 16:03:27 2020
@author: ning
"""
import os
import pandas as pd
from matplotlib import pyplot as plt
plt.style.use('dark_background')
df = pd.read_csv('../data/sampled.csv',encoding = 'latin-1')
figure_dir = '../stimuli_figure'
if not os.path.exists(figure_dir):
os.mkdir(figure_dir)
fontsize = 16
image_size_pixel = 512
dpi = 300
pathes = []
for ii,row in df.iterrows():
row
fig,ax = plt.subplots(figsize = (image_size_pixel/dpi,image_size_pixel/dpi))
ax.text(0.45,0.45,row['Word'].lower(),
ha = 'center',
fontsize = fontsize,)
ax.axis('off')
pathes.append(os.path.join(figure_dir,f'{row["English"].lower()}.jpeg'))
fig.savefig(os.path.join(figure_dir,f'{row["English"].lower()}.jpeg'),
dpi = 300,
bbox_inches = 'tight')
plt.close('all')
df['PATH_spanish'] = pathes
#pathes = []
#for ii,row in df.iterrows():
# row
# fig,ax = plt.subplots(figsize = (image_size_pixel/dpi,image_size_pixel/dpi))
# ax.text(0.45,0.45,row['English'].lower(),
# ha = 'center',
# fontsize = fontsize,)
# ax.axis('off')
# pathes.append(os.path.join(figure_dir,f'{row["English"].upper()}.jpeg'))
# fig.savefig(os.path.join(figure_dir,f'{row["English"].upper()}.jpeg'),
# dpi = 300,
# bbox_inches = 'tight')
# plt.close('all')
#
#df['PATH_english'] = pathes
columns = []
for col in df.columns:
if '\n' in col:
col = col.replace('\n','_')
columns.append(col)
df.columns = columns
df.to_csv('../data/sampled_words.csv',encoding = 'latin-1',index = False)
=======
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 18 16:11:23 2020
@author: nmei
"""
import os
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
plt.style.use('dark_background')
df = pd.read_csv('../data/sampled.csv',encoding = 'latin-1')
figure_dir = '../stimuli_figure'
if not os.path.exists(figure_dir):
os.mkdir(figure_dir)
dpi = 300
image_size = 512
fontsize = 20
pathes = []
for ii,row in df.iterrows():
fig,ax = plt.subplots(figsize = (image_size/dpi,image_size/dpi))
ax.text(0.45,0.45,row['Word'].lower(),
ha = 'center',
va = 'center',
fontsize = fontsize,
)
ax.axis('off')
plt.gca().set_aspect('equal', adjustable='box')
pathes.append(os.path.join(figure_dir,f'{row["English"].lower()}.jpeg'))
fig.savefig(os.path.join(figure_dir,f'{row["English"].lower()}.jpeg'),
dpi = dpi,
# bbox_inches = 'tight'
)
plt.close('all')
df['PATH_spanish'] = pathes
pathes = []
for ii,row in df.iterrows():
fig,ax = plt.subplots(figsize = (image_size/dpi,image_size/dpi))
ax.text(0.45,0.45,row['English'].lower(),
ha = 'center',
va = 'center',
fontsize = fontsize,
)
ax.axis('off')
plt.gca().set_aspect('equal', adjustable='box')
pathes.append(os.path.join(figure_dir,f'{row["English"].upper()}.jpeg'))
fig.savefig(os.path.join(figure_dir,f'{row["English"].upper()}.jpeg'),
dpi = dpi,
# bbox_inches = 'tight'
)
plt.close('all')
df['PATH_english'] = pathes
columns = []
for col in df.columns:
if '\n' in col:
col = col.replace('\n','_')
columns.append(col)
df.columns = columns
# add varying blank period
df['blank_dur'] = np.random.uniform(low = 0.3,high = 0.7,size = df.shape[0])
df['category'] = df['Category'].map({'animal':'Living_Things',
'object':'Nonliving_Things'})
df.to_csv('../data/sampled_words.csv',encoding = 'latin-1',index = False)
>>>>>>> 5fe83eaa2f078f018ecde81bc7fd2529be564dee
| 2.609375
| 3
|
plugins/multi_neighbors/test_multi_neighbors.py
|
mohnjahoney/website_source
| 13
|
12774207
|
<reponame>mohnjahoney/website_source<filename>plugins/multi_neighbors/test_multi_neighbors.py<gh_stars>10-100
import unittest
import multi_neighbors
class PseudoArticlesGenerator:
def __init__(self, settings=None, articles=None):
self.settings = settings or {}
self.articles = articles or []
class PseudoArticle:
def __init__(self, title=""):
self.title = title
def __repr__(self):
return f"{self.__class__.__name__}({self.title})"
class NeighborsTestCase(unittest.TestCase):
def setUp(self):
self.generator = PseudoArticlesGenerator(
settings={"MULTI_NEIGHBORS": 3},
articles=[
PseudoArticle("article6-newest"), # List position 0.
PseudoArticle("article5"), # List position 1.
PseudoArticle("article4"), # List position 2.
PseudoArticle("article3"), # List position 3.
PseudoArticle("article2"), # List position 4.
PseudoArticle("article1"), # List position 5.
PseudoArticle("article0-oldest"), # List position 6.
],
)
multi_neighbors.neighbors(self.generator)
def test_prev_next(self):
# Test prev articles.
self.assertTrue(
self.generator.articles[0].prev_articles
== [
self.generator.articles[1],
self.generator.articles[2],
self.generator.articles[3],
]
)
self.assertTrue(
self.generator.articles[1].prev_articles
== [
self.generator.articles[2],
self.generator.articles[3],
self.generator.articles[4],
]
)
self.assertTrue(
self.generator.articles[2].prev_articles
== [
self.generator.articles[3],
self.generator.articles[4],
self.generator.articles[5],
]
)
self.assertTrue(
self.generator.articles[3].prev_articles
== [
self.generator.articles[4],
self.generator.articles[5],
self.generator.articles[6],
]
)
self.assertTrue(
self.generator.articles[4].prev_articles
== [self.generator.articles[5], self.generator.articles[6]]
)
self.assertTrue(
self.generator.articles[5].prev_articles == [self.generator.articles[6]]
)
self.assertFalse(self.generator.articles[6].prev_articles)
# Test next articles.
self.assertFalse(self.generator.articles[0].next_articles)
self.assertTrue(
self.generator.articles[1].next_articles == [self.generator.articles[0]]
)
self.assertTrue(
self.generator.articles[2].next_articles
== [self.generator.articles[1], self.generator.articles[0]]
)
self.assertTrue(
self.generator.articles[3].next_articles
== [
self.generator.articles[2],
self.generator.articles[1],
self.generator.articles[0],
]
)
self.assertTrue(
self.generator.articles[4].next_articles
== [
self.generator.articles[3],
self.generator.articles[2],
self.generator.articles[1],
]
)
self.assertTrue(
self.generator.articles[5].next_articles
== [
self.generator.articles[4],
self.generator.articles[3],
self.generator.articles[2],
]
)
self.assertTrue(
self.generator.articles[6].next_articles
== [
self.generator.articles[5],
self.generator.articles[4],
self.generator.articles[3],
]
)
if __name__ == "__main__":
unittest.main()
| 2.921875
| 3
|
python/kf_true.py
|
gcgibson/ssvgd
| 1
|
12774208
|
from pydlm import dlm, trend, seasonality
from scipy.stats import norm
import numpy as np
import matplotlib.pyplot as plt
# A linear trend
linear_trend = trend(degree=1, discount=1, name='linear_trend', w=10)
# A seasonality
time_series = []
for i in range(10):
if i == 0:
x_sim = np.random.normal(0,1,1)
else:
x_sim = np.random.normal(x_sim,10,1)
time_series.append(np.random.normal(x_sim,10,1))
time_series = np.array(time_series)
simple_dlm = dlm(time_series) + linear_trend
simple_dlm.fit()
filteredMean = simple_dlm.getMean(filterType='forwardFilter')
filteredVar = simple_dlm.getVar(filterType='forwardFilter')
ll = 0
one_step_ahead_samples = []
for i in range(len(time_series)):
tmp_samples = []
for j in range(1000):
tmp = np.random.normal(filteredMean[i],filteredVar[i], 1)
tmp_samples.append(np.random.normal(tmp,1,1))
one_step_ahead_samples.append(tmp_samples)
one_step_ahead_samples = np.array(one_step_ahead_samples)
upper_pi = []
lower_pi = []
for p in one_step_ahead_samples:
upper_pi.append(np.percentile(p,95))
lower_pi.append(np.percentile(p,5))
time_series_shifted = time_series
#plt.plot(range(len(time_series_shifted)),time_series_shifted,color='orange')
#plt.fill_between(range(len(time_series_shifted)),upper_pi,lower_pi,alpha=.3)
#plt.show()
from pykalman import KalmanFilter
random_state = np.random.RandomState(0)
transition_matrix = 1
transition_offset = .1
observation_matrix = 1
observation_offset = 1
transition_covariance = 10
observation_covariance = 1
initial_state_mean = 0
initial_state_covariance = 1
# sample from model
kf = KalmanFilter(
transition_matrix, observation_matrix, transition_covariance,
observation_covariance, transition_offset, observation_offset,
initial_state_mean, initial_state_covariance,
random_state=random_state
)
filtered_state_means, filtered_state_variances = kf.filter(time_series)
filteredMean = filtered_state_means.reshape((-1))
filteredVar = filtered_state_variances.reshape((-1))
one_step_ahead_samples = []
for i in range(len(time_series)):
tmp_samples = []
for j in range(10000):
tmp = np.random.normal(filteredMean[i],filteredVar[i], 1)
tmp2 = np.random.normal(tmp,10,1)
tmp_samples.append(np.random.normal(tmp2,10,1))
one_step_ahead_samples.append(tmp_samples)
one_step_ahead_samples = np.array(one_step_ahead_samples)
upper_pi = []
lower_pi = []
for p in one_step_ahead_samples:
upper_pi.append(np.percentile(p,95))
lower_pi.append(np.percentile(p,5))
time_series = time_series.reshape((-1))
time_series_shifted = time_series.tolist()[1:] + [10]
plt.plot(range(len(time_series_shifted)),time_series_shifted,color='orange')
plt.fill_between(range(len(time_series_shifted)),upper_pi,lower_pi,alpha=.3)
plt.show()
| 2.921875
| 3
|
app/res/view/json_editor/self.py
|
HsOjo/PyJSONEditor
| 0
|
12774209
|
<reponame>HsOjo/PyJSONEditor
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'self.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_JSONEditor(object):
def setupUi(self, JSONEditor):
JSONEditor.setObjectName("JSONEditor")
JSONEditor.resize(400, 300)
self.horizontalLayout = QtWidgets.QHBoxLayout(JSONEditor)
self.horizontalLayout.setObjectName("horizontalLayout")
self.tw_content = QtWidgets.QTreeWidget(JSONEditor)
self.tw_content.setTabKeyNavigation(True)
self.tw_content.setAlternatingRowColors(True)
self.tw_content.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tw_content.setAnimated(True)
self.tw_content.setAllColumnsShowFocus(True)
self.tw_content.setWordWrap(False)
self.tw_content.setExpandsOnDoubleClick(False)
self.tw_content.setObjectName("tw_content")
self.horizontalLayout.addWidget(self.tw_content)
self.retranslateUi(JSONEditor)
QtCore.QMetaObject.connectSlotsByName(JSONEditor)
def retranslateUi(self, JSONEditor):
_translate = QtCore.QCoreApplication.translate
JSONEditor.setWindowTitle(_translate("JSONEditor", "JSONEditor"))
self.tw_content.headerItem().setText(0, _translate("JSONEditor", "key"))
self.tw_content.headerItem().setText(1, _translate("JSONEditor", "type"))
self.tw_content.headerItem().setText(2, _translate("JSONEditor", "value"))
| 1.9375
| 2
|
torch_t2t/torch_t2t_train.py
|
radiodee1/awesome-chatbot
| 22
|
12774210
|
<reponame>radiodee1/awesome-chatbot
#!/usr/bin/python3
"""
Sequence-to-Sequence Modeling with nn.Transformer and TorchText
===============================================================
This is a tutorial on how to train a sequence-to-sequence model
that uses the
`nn.Transformer <https://pytorch.org/docs/master/nn.html?highlight=nn%20transformer#torch.nn.Transformer>`__ module.
PyTorch 1.2 release includes a standard transformer module based on the
paper `Attention is All You
Need <https://arxiv.org/pdf/1706.03762.pdf>`__. The transformer model
has been proved to be superior in quality for many sequence-to-sequence
problems while being more parallelizable. The ``nn.Transformer`` module
relies entirely on an attention mechanism (another module recently
implemented as `nn.MultiheadAttention <https://pytorch.org/docs/master/nn.html?highlight=multiheadattention#torch.nn.MultiheadAttention>`__) to draw global dependencies
between input and output. The ``nn.Transformer`` module is now highly
modularized such that a single component (like `nn.TransformerEncoder <https://pytorch.org/docs/master/nn.html?highlight=nn%20transformerencoder#torch.nn.TransformerEncoder>`__
in this tutorial) can be easily adapted/composed.
.. image:: ../_static/img/transformer_architecture.jpg
"""
######################################################################
# Define the model
# ----------------
#
######################################################################
# In this tutorial, we train ``nn.TransformerEncoder`` model on a
# language modeling task. The language modeling task is to assign a
# probability for the likelihood of a given word (or a sequence of words)
# to follow a sequence of words. A sequence of tokens are passed to the embedding
# layer first, followed by a positional encoding layer to account for the order
# of the word (see the next paragraph for more details). The
# ``nn.TransformerEncoder`` consists of multiple layers of
# `nn.TransformerEncoderLayer <https://pytorch.org/docs/master/nn.html?highlight=transformerencoderlayer#torch.nn.TransformerEncoderLayer>`__. Along with the input sequence, a square
# attention mask is required because the self-attention layers in
# ``nn.TransformerEncoder`` are only allowed to attend the earlier positions in
# the sequence. For the language modeling task, any tokens on the future
# positions should be masked. To have the actual words, the output
# of ``nn.TransformerEncoder`` model is sent to the final Linear
# layer, which is followed by a log-Softmax function.
#
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import argparse
#import pprint
#pp = pprint.PrettyPrinter(indent=4)
class TransformerModel(nn.Module):
def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
super(TransformerModel, self).__init__()
from torch.nn import TransformerEncoder, TransformerEncoderLayer
self.model_type = 'Transformer'
self.src_mask = None
self.pos_encoder = PositionalEncoding(ninp, dropout)
encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.encoder = nn.Embedding(ntoken, ninp)
self.ninp = ninp
self.decoder = nn.Linear(ninp, ntoken)
self.init_weights()
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, src):
if self.src_mask is None or self.src_mask.size(0) != len(src):
device = src.device
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
src = self.encoder(src) * math.sqrt(self.ninp)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, self.src_mask)
output = self.decoder(output)
return output
######################################################################
# ``PositionalEncoding`` module injects some information about the
# relative or absolute position of the tokens in the sequence. The
# positional encodings have the same dimension as the embeddings so that
# the two can be summed. Here, we use ``sine`` and ``cosine`` functions of
# different frequencies.
#
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
######################################################################
# Load and batch data
# -------------------
#
######################################################################
# The training process uses Wikitext-2 dataset from ``torchtext``. The
# vocab object is built based on the train dataset and is used to numericalize
# tokens into tensors. Starting from sequential data, the ``batchify()``
# function arranges the dataset into columns, trimming off any tokens remaining
# after the data has been divided into batches of size ``batch_size``.
# For instance, with the alphabet as the sequence (total length of 26)
# and a batch size of 4, we would divide the alphabet into 4 sequences of
# length 6:
#
# .. math::
# \begin{bmatrix}
# \text{A} & \text{B} & \text{C} & \ldots & \text{X} & \text{Y} & \text{Z}
# \end{bmatrix}
# \Rightarrow
# \begin{bmatrix}
# \begin{bmatrix}\text{A} \\ \text{B} \\ \text{C} \\ \text{D} \\ \text{E} \\ \text{F}\end{bmatrix} &
# \begin{bmatrix}\text{G} \\ \text{H} \\ \text{I} \\ \text{J} \\ \text{K} \\ \text{L}\end{bmatrix} &
# \begin{bmatrix}\text{M} \\ \text{N} \\ \text{O} \\ \text{P} \\ \text{Q} \\ \text{R}\end{bmatrix} &
# \begin{bmatrix}\text{S} \\ \text{T} \\ \text{U} \\ \text{V} \\ \text{W} \\ \text{X}\end{bmatrix}
# \end{bmatrix}
#
# These columns are treated as independent by the model, which means that
# the dependence of ``G`` and ``F`` can not be learned, but allows more
# efficient batch processing.
#
parser = argparse.ArgumentParser(
description='Fine-tune a Transformer.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--tenk', action='store_true', help='use ten-k dataset')
parser.add_argument('--task', default=1, help='use specific question-set/task', type=int)
parser.add_argument('--lr', default=0.1, help='learning rate', type=float)
parser.add_argument('--epochs', default=30, help='number of epochs', type=int)
parser.add_argument('--no_scheduler', action='store_true',help='cancel learning rate decay')
parser.add_argument('--small', action='store_true', help='use modest hparams')
args = parser.parse_args()
print('loading')
import torchtext
from torchtext.data.utils import get_tokenizer
TEXT = torchtext.data.Field(tokenize=get_tokenizer("basic_english"),
init_token='<sos>',
eos_token='<eos>',
lower=True)
#train_txt, val_txt, test_txt = torchtext.datasets.WikiText2.splits(TEXT)
#TEXT.build_vocab(train_txt)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
ten_k = args.tenk
task = args.task #1 #9
babi_train_txt_in, babi_val_txt_in, babi_test_txt_in = torchtext.datasets.BABI20.splits(TEXT, root='../raw/', tenK=ten_k, task=task)
def find_and_parse_story(data, period=False):
for ii in range(len(data.examples)):
z = data.examples[ii]
out = []
for i in z.story:
i = i.split(' ')
for j in i:
out.append(j)
if period:
out.append('.')
#print(out)
data.examples[ii].story = out
data.examples[ii].query.append('?')
return data
babi_train_txt = find_and_parse_story(babi_train_txt_in, period=True)
babi_val_txt = find_and_parse_story(babi_val_txt_in, period=True)
babi_test_txt = find_and_parse_story(babi_test_txt_in, period=True)
TEXT.build_vocab(babi_train_txt)
def batchify_babi(data, bsz, separate_ques=True, size_src=200, size_tgt=200, print_to_screen=False):
new_data = []
target_data = []
for ii in range(len(data.examples)):
z = data.examples[ii]
target_data_tmp = [] #['<sos>']
if not separate_ques:
z.story.extend(z.query)
z.story.extend('.')
new_data.extend(z.story)
new_data.append('<eos>')
target_data_tmp.extend(z.story)
target_data_tmp.extend(z.answer)
target_data_tmp.append('<eos>')
#print(z.answer, len(z.answer))
ll = 2
target_data_tmp = target_data_tmp[ll :len(z.story) + ll]
#print(z.story,'\n',target_data_tmp)
target_data.extend(target_data_tmp)
else:
z.story.insert(0, '<sos>')
z.story.extend(z.query)
z.story.extend([ '<eos>'])
#z.story.extend('.')
new_data.append(z.story)
target_data_tmp.extend(z.answer)
target_data_tmp.append('<eos>')
target_data.append(target_data_tmp)
pass
if print_to_screen: print(new_data[0:5],'nd')
m = max([len(x) for x in new_data])
n = max([len(x[0]) for x in target_data])
m = max(m, size_src)
#n = max(n, size_tgt)
n = m
#print(m,'m', [len(x) for x in new_data])
if not separate_ques:
new_data = TEXT.numericalize([new_data])
target_data = TEXT.numericalize([target_data])
#new_n_data = new_data
#target_n_data = target_data
bsz = n
nbatch_s = new_data.size(0) // bsz
nbatch_t = target_data.size(0) // bsz
nbatch = min(nbatch_s, nbatch_t)
#print(nbatch_s, nbatch_t, len(new_data), len(target_data))
# Trim off any extra elements that wouldn't cleanly fit (remainders).
new_data = new_data.narrow(0, 0, nbatch * bsz)
target_data = target_data.narrow(0, 0, nbatch * bsz)
###target_data = target_data.narrow(0, 0, nbatch * bsz)
#print(new_data.size(), target_data.size())
# Evenly divide the data across the bsz batches.
new_n_data = new_data.view(bsz, -1).t().contiguous()
target_n_data = target_data.view(bsz, -1).t().contiguous()
else:
#padded_data = torch.zeros(1, m, dtype=torch.long)
#padded_target = torch.zeros(1, n, dtype=torch.long)
new_n_data = torch.zeros( len(new_data), m, dtype=torch.long)
target_n_data = torch.zeros( len(target_data), n, dtype=torch.long)
for jj in range(len(new_data)):
## do source ##
z = TEXT.numericalize([new_data[jj]])
if z.size(0) > 1:
z = z.t()
p = torch.zeros(1, m, dtype=torch.long)
p[0, :len(z[0])] = z
new_n_data[jj, :] = p
## do target ##
y = TEXT.numericalize([target_data[jj]])
if y.size(0) > 1:
y = y.t()
q = torch.zeros(1, n, dtype=torch.long)
q[0,:len(y[0])] = y
target_n_data[jj, :] = q
new_n_data = new_n_data.t().contiguous()
#new_n_data = new_n_data.contiguous()
target_n_data = target_n_data.t().contiguous()
#print(new_n_data.t()[0:5], 't-nnd')
return new_n_data.to(device), target_n_data.to(device), m
def batchify(data, bsz):
data = TEXT.numericalize([data.examples[0].text])
# Divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
return data.to(device)
batch_size = 20
eval_batch_size = 10
size_tgt = 24 #40000
size_src = -1
print('load train')
babi_train_txt, babi_train_tgt, m_train = batchify_babi(
babi_train_txt,
batch_size,
size_tgt=size_tgt,
size_src=size_src,
print_to_screen=False,
separate_ques=True)
print('load val')
babi_val_txt, babi_val_tgt, m_val = batchify_babi(
babi_val_txt,
batch_size,
size_tgt=size_tgt,
size_src=size_src,
separate_ques=True)
print('load tst')
babi_test_txt, babi_test_tgt, m_test = batchify_babi(
babi_test_txt,
batch_size,
size_tgt=size_tgt,
size_src=size_src,
separate_ques=True)
######################################################################
# Functions to generate input and target sequence
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
######################################################################
# ``get_batch()`` function generates the input and target sequence for
# the transformer model. It subdivides the source data into chunks of
# length ``bptt``. For the language modeling task, the model needs the
# following words as ``Target``. For example, with a ``bptt`` value of 2,
# we’d get the following two Variables for ``i`` = 0:
#
# .. image:: ../_static/img/transformer_input_target.png
#
# It should be noted that the chunks are along dimension 0, consistent
# with the ``S`` dimension in the Transformer model. The batch dimension
# ``N`` is along dimension 1.
#
bptt = 35
def get_batch_babi(source, target, i, print_to_screen=False, bptt=35, flatten_target=True):
#seq_len = min(bptt, len(source) - 1 - i)
seq_len = bptt #len(source[0])
#seq_len = min(bptt, len(source) )
#data = source[:,i:i + seq_len]
#target = target[:,i:i + seq_len]
data = source[:, i : i + seq_len]
target = target[:, i : i + seq_len]
#print(label, bptt, i, 'lbl', data.size())
if flatten_target:
target = target.view(-1)
if print_to_screen: print(data, target, i, 'dti')
return data, target
#bptt = 35
def get_batch(source, i):
seq_len = min(bptt, len(source) - 1 - i)
data = source[i:i+seq_len]
target = source[i+1:i+1+seq_len].view(-1)
return data, target
def show_strings(source):
if len(source.size()) > 1:
source = source.squeeze(0)
for i in source:
if i != 0:
print(TEXT.vocab.itos[i], end=' | ')
print()
if False:
label = 'pre'
#print(babi_train_txt_in.examples[0:5])
#for i in babi_train_txt_in.examples[0:5]:
# print(i.story)
exit()
for i in range(5):
tt1, tt2 = get_batch_babi(babi_train_txt, babi_train_tgt, i, bptt=1,flatten_target=False)
print(tt1.t(),'\n',tt2.t()[0])
print(tt1.size(), tt2.size(),'t,t')
#show_strings(babi_train_txt[0])
#show_strings(babi_train_tgt[0])
exit()
for i in range(tt1.size(0)):
print(i, tt1.size(0))
show_strings(tt1.t())
print()
show_strings(tt2.t())
print('-')
exit()
def show_tensor_vals(source):
zero = 0
for i in range(source.size(0)):
for ii in range(len(source[i])):
z = source[i][ii]
if not z is 0:
print(z, end='|')
print(TEXT.vocab.itos[z], end='|')
else:
z += 1
pass
print('\n',zero, 'zeros')
######################################################################
# Initiate an instance
# --------------------
#
######################################################################
# The model is set up with the hyperparameter below. The vocab size is
# equal to the length of the vocab object.
#
ntokens = len(TEXT.vocab.stoi) # the size of vocabulary
emsize = 40 #384# 200 # embedding dimension
nhid = 384#200 # the dimension of the feedforward network model in nn.TransformerEncoder
nlayers = 4 #2 # the number of nn.TransformerEncoderLayer in nn.TransformerEncoder
nhead = 4 #2 # the number of heads in the multiheadattention models
dropout = 0.2 # the dropout value
if args.small:
print('modest hparams')
emsize = 40 #200
nhid = 200
nlayers = 2
nhead = 2
model = TransformerModel(ntokens, emsize, nhead, nhid, nlayers, dropout).to(device)
#print(m_train, 'mtrain')
######################################################################
# Run the model
# -------------
#
######################################################################
# `CrossEntropyLoss <https://pytorch.org/docs/master/nn.html?highlight=crossentropyloss#torch.nn.CrossEntropyLoss>`__
# is applied to track the loss and
# `SGD <https://pytorch.org/docs/master/optim.html?highlight=sgd#torch.optim.SGD>`__
# implements stochastic gradient descent method as the optimizer. The initial
# learning rate is set to 5.0. `StepLR <https://pytorch.org/docs/master/optim.html?highlight=steplr#torch.optim.lr_scheduler.StepLR>`__ is
# applied to adjust the learn rate through epochs. During the
# training, we use
# `nn.utils.clip_grad_norm\_ <https://pytorch.org/docs/master/nn.html?highlight=nn%20utils%20clip_grad_norm#torch.nn.utils.clip_grad_norm_>`__
# function to scale all the gradient together to prevent exploding.
#
criterion = nn.CrossEntropyLoss()
lr = args.lr # 1.0 #5.0 # learning rate
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)
label = 'val'
import time
def train():
model.train() # Turn on the train mode
total_loss = 0.
start_time = time.time()
ntokens = len(TEXT.vocab.stoi)
bptt = 1#m_train #1
babi_train_txt_size = babi_train_txt.size(1) -1
for batch, i in enumerate(range(0, babi_train_txt_size, bptt)):
#print(i, 'progress', babi_train_txt.size())
#babi_train_txt_t = babi_train_txt.t()
data, targets = get_batch_babi(babi_train_txt, babi_train_tgt, i, bptt=bptt, flatten_target=False)
#print(data.size(), targets.size(),'d,t')
#bsz = data.size(0)
optimizer.zero_grad()
output = model(data)
#output = output.transpose(1,0)
#print(output.size(),'os')
targets_t = targets.t()
#print(targets_t.size(), targets.size())
prediction_text = torch.argmax(output.view(-1,ntokens), dim=1)
index = 0
if (not ten_k or i % 100 == 0) and False:
print(
'i',
i,
bptt,
TEXT.vocab.itos[prediction_text[index].item()],
TEXT.vocab.itos[prediction_text[index + 1].item()],
TEXT.vocab.itos[prediction_text[index + 2].item()],
'['+TEXT.vocab.itos[targets_t[index,0].item()]+']')
print(prediction_text.size(), targets_t.size(), targets_t[0,0].item(),prediction_text[index].item(), 'p,tt')
targets = targets.contiguous().view(-1)
loss = criterion(output.view( -1, ntokens), targets) ### <---
#loss = criterion(output.view(-1, ntokens), targets) ### <---
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
total_loss += loss.item()
log_interval = 200
if batch % log_interval == 0 and batch > 0:
cur_loss = total_loss / log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | '
'lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f} | last acc val {:5.2f}'.format(
epoch, batch, len(babi_train_txt) // bptt, scheduler.get_lr()[0],
elapsed * 1000 / log_interval,
cur_loss, math.exp(cur_loss), acc_val))
total_loss = 0
start_time = time.time()
def evaluate(eval_model, data_source, data_tgt, m_data=1, show_accuracy=False):
eval_model.eval() # Turn on the evaluation mode
total_loss = 0.
ntokens = len(TEXT.vocab.stoi)
acc = 0
acc_tot = 0
acc_count = 0
saved_dim = -1
out_dim = -1
bptt = m_data #1
data_source_size = data_source.size(0) - 1
pr_to_screen = False
with torch.no_grad():
for i in range(0, data_source_size, bptt):
data, targets = get_batch_babi(data_source, data_tgt, i,bptt=bptt, flatten_target=False)
output = eval_model(data)
#output = output.squeeze(1)
#print(output.size(), 'out', label)
#output = output.t().contiguous()
output_flat = output.view(-1, ntokens)
output_flat_t = output.transpose(1,0).contiguous().view(-1, ntokens)
#output_flat_t = output.contiguous().view(-1, ntokens)
#print(output_flat_t.size(),'of')
#output_argmax = torch.argmax(output_flat_t, dim=-1)
targets_t = targets.t()
targets = targets.contiguous().view(-1)
total_loss += len(data) * criterion(output_flat, targets).item()
targets_text = targets_t
out_dim = output.size(0)
#print(targets_text.size(), i, 'eval')
if saved_dim == -1 or saved_dim == out_dim:
saved_dim = out_dim
if i == 0 and pr_to_screen: print(bptt, out_dim, 'dim ', end='|')
#print(targets_text.size(0), targets_text.size(1),'tt')
#print(model.encoder.weight.data.size(),'enc')
if targets_text.size(0) > i or bptt == 1:
index_i = i
if bptt == 1: index_i = 0
for ii in range(0, 10): #output_flat.size(0)):
text = torch.argmax(output_flat_t, dim=-1)[ii].item()
text_max = torch.argmax(output_flat_t, dim=-1)
if text != 0:
acc_count += 1
pass
else:
break
pass
if text == targets_text[index_i,ii].item() and text != 0:
acc += 1
print(
text_max[:5],
ii,
TEXT.vocab.itos[text],':score acc')
#break
else:
print(text_max[:5])
pass
if i == 0 and pr_to_screen: print()
if show_accuracy:
if acc_count > 0:
acc_tot = acc / acc_count * 100.0
else:
acc_tot = 0.0
print('acc:', acc_tot, 'lr:', scheduler.get_lr()[0],'loss:', total_loss/(len(data_source) -1) , ':'+label+':')
return total_loss / (len(data_source) - 1), acc_tot
######################################################################
# Loop over epochs. Save the model if the validation loss is the best
# we've seen so far. Adjust the learning rate after each epoch.
best_val_loss = float("inf")
epochs = args.epochs # 30 # The number of epochs
best_model = None
if False:
for i in range(babi_train_txt.size(0) -1):
t, _, _ = mult_word(model, babi_train_txt, babi_train_tgt,index=0, m_data=m_train, show_accuracy=True, append=i)
print(t,'t')
exit()
#print('train')
acc_val = 0.0
for epoch in range(1, epochs + 1):
epoch_start_time = time.time()
train()
label = 'val'
val_loss, acc_val = evaluate(model, babi_val_txt, babi_val_tgt, m_data=1, show_accuracy=True)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f} | acc {:5.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss), acc_val))
print('-' * 89)
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = model
label = 'tst'
_, acc_tst = evaluate(model, babi_test_txt, babi_test_tgt, m_data=1, show_accuracy=True)
if not bool(args.no_scheduler):
scheduler.step()
######################################################################
# Evaluate the model with the test dataset
# -------------------------------------
#
# Apply the best model to check the result with the test dataset.
test_loss, acc = evaluate(best_model, babi_test_txt, babi_test_tgt, m_data=1, show_accuracy=True)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | acc tst {:5.2f} '.format(
test_loss, math.exp(test_loss), acc))
print('=' * 89)
| 2.984375
| 3
|
apps/forms-flow-ai/forms-flow-api/src/api/services/application.py
|
saravanpa-aot/SBC_DivApps
| 2
|
12774211
|
"""This exposes application service."""
from http import HTTPStatus
from ..exceptions import BusinessException
from ..models import Application, FormProcessMapper
from ..schemas import AggregatedApplicationSchema, ApplicationSchema
from .external import BPMService
from ..schemas import FormProcessMapperSchema
import logging
class ApplicationService():
"""This class manages application service."""
@staticmethod
def create_application(data, token):
"""Create new application."""
data['application_status'] = 'new'
mapper = FormProcessMapper.find_by_form_id(data['form_id'])
# temperory until the frontend can provide form_process_mapper_id
data['form_process_mapper_id'] = mapper.id
data['application_name'] = mapper.form_name
application = Application.create_from_dict(data)
payload = {'variables': {"applicationId": {'value': application.id}, "formUrl": {'value': application.form_url},"formName": {'value': application.application_name},"submitterName": {'value': application.created_by},"submissionDate": {'value': application.created.__str__()}}}
response = BPMService.post_process_start(mapper.process_key, payload, token)
application.update({'process_instance_id': response['id']})
return application
@staticmethod
def get_all_applications(page_no, limit):
"""Get all applications."""
if page_no:
page_no = int(page_no)
if limit:
limit = int(limit)
applications = Application.find_all(page_no, limit)
application_schema = ApplicationSchema()
return application_schema.dump(applications, many=True)
@staticmethod
def get_all_applications_by_user(user_id, page_no, limit):
"""Get all applications."""
if page_no:
page_no = int(page_no)
if limit:
limit = int(limit)
applications = Application.find_all_by_user(user_id, page_no, limit)
application_schema = ApplicationSchema()
return application_schema.dump(applications, many=True)
@staticmethod
def get_all_applications_ids(application_ids):
applications = Application.find_by_ids(application_ids)
application_schema = ApplicationSchema()
return application_schema.dump(applications, many=True)
@staticmethod
def get_all_application_count():
"""Get application count."""
return Application.query.count()
@staticmethod
def get_all_application_by_user_count(user_id):
"""Get application count."""
return Application.find_all_by_user_count(user_id)
@staticmethod
def get_all_applications_form_id(form_id,page_no, limit):
"""Get all applications."""
if page_no:
page_no = int(page_no)
if limit:
limit = int(limit)
applications = Application.find_by_form_id(form_id, page_no, limit)
application_schema = ApplicationSchema()
return application_schema.dump(applications, many=True)
@staticmethod
def get_all_applications_form_id_user(form_id,user_id,page_no, limit):
"""Get all applications."""
if page_no:
page_no = int(page_no)
if limit:
limit = int(limit)
applications = Application.find_by_form_id_user(form_id,user_id, page_no, limit)
application_schema = ApplicationSchema()
return application_schema.dump(applications, many=True)
@staticmethod
def get_all_applications_form_id_count(form_id):
"""Get application count."""
return Application.find_all_by_form_id_count(form_id)
@staticmethod
def get_all_applications_form_id_user_count(form_id, user_id):
"""Get application count."""
return Application.find_all_by_form_id_user_count(form_id,user_id)
@staticmethod
def get_application(application_id):
"""Get application by id."""
return ApplicationSchema().dump(Application.find_by_id(application_id))
@staticmethod
def update_application(application_id, data):
"""Update application."""
application = Application.find_by_id(application_id)
if application:
application.update(data)
else:
raise BusinessException('Invalid application', HTTPStatus.BAD_REQUEST)
@staticmethod
def get_aggregated_applications(from_date: str, to_date: str):
"""Get aggregated applications."""
applications = Application.find_aggregated_applications(from_date, to_date)
schema = AggregatedApplicationSchema(exclude=('application_status',))
return schema.dump(applications, many=True)
@staticmethod
def get_aggregated_application_status(mapper_id: int, from_date: str, to_date: str):
"""Get aggregated application status."""
application_status = Application.find_aggregated_application_status(mapper_id, from_date, to_date)
schema = AggregatedApplicationSchema(exclude=('form_process_mapper_id',))
return schema.dump(application_status, many=True)
@staticmethod
def get_application_form_mapper_by_id(application_id):
"""Get form process mapper."""
mapper = FormProcessMapper.find_by_application_id(application_id)
if mapper:
mapper_schema = FormProcessMapperSchema()
return mapper_schema.dump(mapper)
raise BusinessException('Invalid application', HTTPStatus.BAD_REQUEST)
@staticmethod
def apply_custom_attributes(application_schema):
if isinstance(application_schema, list):
for entry in application_schema:
ApplicationSchemaWrapper.apply_attributes(entry)
else:
ApplicationSchemaWrapper.apply_attributes(application_schema)
return application_schema
class ApplicationSchemaWrapper:
@staticmethod
def apply_attributes(application):
formurl = application['formUrl']
application['formId'] = formurl[formurl.find("/form/")+6:formurl.find("/submission/")]
application['submissionId'] = formurl[formurl.find("/submission/")+12:len(formurl)]
return application
| 2.140625
| 2
|
segmentation_rt/rs2mask/rs2mask.py
|
BrouBoni/segmentation_RT
| 6
|
12774212
|
""" Implementation of :py:class:`Dataset` object. A folder containing a set of subjects with CT and RS in dicom format
is converted into nii format. A new folder is created keeping the same organization.
"""
import os
import numpy as np
from dcmrtstruct2nii import dcmrtstruct2nii, list_rt_structs
class Dataset:
"""
From dicom to dataset class. Convert CT and RTSTRUCT into nii, readable by deep learning frameworks.
All subfolders representing subject must contain the CT and the RS associated.
Example:
>>> from segmentation_rt.rs2mask import Dataset
>>> structures = ['Heart', 'Breast L', 'Breast R']
>>> dataset = Dataset('data/dicom_dataset', 'data/nii_dataset', structures)
>>> dataset.make()
:param string path:
Root directory.
:param string export_path:
Export path.
:param list[string] structures:
List of desired structure(s).
:param bool force:
Force export even if one structure is missing.
"""
def __init__(self, path, export_path, structures, force=True):
self.path = path
self.export_path = export_path
self.structures = structures
self.dataset_name = os.path.basename(export_path)
self.force = force
self.root_path = os.path.dirname(self.path)
self.patients = [folder for folder in os.listdir(self.path) if
os.path.isdir(os.path.join(self.path, folder))]
self.patient_paths = [os.path.join(self.path, patient) for patient in self.patients]
self.rs_paths = self.get_rs()
def __str__(self):
return self.dataset_name
def get_rs(self):
"""
List RTSTRUCT for each patient.
:rtype: list[str]
"""
rs_paths = []
for path in self.patient_paths:
files = [filename for filename in os.listdir(path) if filename.startswith("RS")]
assert len(files) > 0, 'at least one RS is required'
rs = files[0]
rs_paths.append(os.path.join(path, rs))
return rs_paths
def find_structures(self, index):
"""
List missing and not missing structures in a RTSTRUCT.
:param index: index of the patient.
:type index: int
:return: List missing and not missing structures.
:rtype: (list[str],list[str])
"""
structures = list_rt_structs(self.rs_paths[index])
ref_structures = np.array(self.structures)
maks = np.in1d(ref_structures, structures)
not_missing = ref_structures[maks]
missing = ref_structures[~maks]
if len(missing):
print(f"WARNING ! Some structures are missing: {missing}\n")
return missing, not_missing
def make(self):
"""Create structures and convert the CT in nii format for each subject."""
print(f"Structure(s) to export: {self.structures}")
print(f"Patient(s) identification : {self.patients}")
for index, path_patient in enumerate(self.patient_paths):
patient_id = self.patients[index]
print(f"Exporting {index + 1} ({patient_id}) on {len(self.patients)}")
nii_output = os.path.join(self.export_path, patient_id)
missing, not_missing = self.find_structures(index)
if len(missing) == 0 or self.force:
dcmrtstruct2nii(self.rs_paths[index], path_patient, nii_output, not_missing, False,
mask_foreground_value=1)
nii_maks = [nii_mask for nii_mask in os.listdir(nii_output) if nii_mask.startswith('mask')]
for nii in nii_maks:
name = os.path.splitext(nii)[0].split("_")[1].replace("-", " ")
os.rename(os.path.join(nii_output, nii), os.path.join(nii_output, name + '.nii'))
os.rename(os.path.join(nii_output, "image.nii"), os.path.join(nii_output, "ct.nii"))
else:
print(f"Skip {patient_id} because of missing structure(s)")
print("Export done")
| 3.078125
| 3
|
generating_markov_chains/markov_chain_two.py
|
AlexanderEllis/presidential-tweets
| 2
|
12774213
|
<gh_stars>1-10
"""
This module is for generating a Markov chain order two from a text.
"""
def generate_markov_chain(tweet_array):
"""
Input assumes text is an array of tweets, each tweet with words separated by spaces with no new lines.
This requires some changes to most transcripts ahead of time. Output will be a dictionary
with each pair of words that exists in the text as a key and a list of words that follow
that pair as the value.
"""
tweets_copy = tweet_array[:]
markov_chain = {}
for tweet in tweets_copy:
word_array = tweet.split()
for i in range(len(word_array) - 2): # - 2 to not analyze the last word as first in pair
first_word = strip_word(word_array[i])
second_word = strip_word(word_array[i + 1])
third_word = strip_word(word_array[i + 2])
pair = first_word + ' ' + second_word
if pair not in markov_chain:
markov_chain[pair] = [third_word]
else:
markov_chain[pair].append(third_word)
return markov_chain
def strip_word(string):
"""
This function strips the words of any surrounding spaces or quotation marks from an input string,
but does not replace any single quotes inside words (e.g. "isn't")
"""
return string.strip().strip("'").strip('"')
| 3.640625
| 4
|
tools/telemetry/telemetry/util/exception_formatter.py
|
shaochangbin/chromium-crosswalk
| 2
|
12774214
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Print prettier and more detailed exceptions."""
import math
import os
import sys
import traceback
from telemetry.core import util
def PrintFormattedException(exception_class, exception, tb):
"""Prints an Exception in a more useful format than the default.
TODO(tonyg): Consider further enhancements. For instance:
- Report stacks to maintainers like depot_tools does.
- Add a debug flag to automatically start pdb upon exception.
"""
def _GetFinalFrame(frame):
final_frame = None
while frame is not None:
final_frame = frame
frame = frame.tb_next
return final_frame
def _AbbreviateMiddle(target, middle, length):
assert length >= 0, 'Must provide positive length'
assert len(middle) <= length, 'middle must not be greater than length'
if len(target) <= length:
return target
half_length = (length - len(middle)) / 2.
return '%s%s%s' % (target[:int(math.floor(half_length))],
middle,
target[-int(math.ceil(half_length)):])
base_dir = os.path.abspath(util.GetChromiumSrcDir())
formatted_exception = traceback.format_exception(
exception_class, exception, tb)
extracted_tb = traceback.extract_tb(tb)
traceback_header = formatted_exception[0].strip()
exception = ''.join([l[2:] if l[:2] == ' ' else l for l in
traceback.format_exception_only(exception_class,
exception)])
local_variables = [(variable, value) for variable, value in
_GetFinalFrame(tb).tb_frame.f_locals.iteritems()
if variable != 'self']
# Format the traceback.
print >> sys.stderr
print >> sys.stderr, traceback_header
for filename, line, function, text in extracted_tb:
filename = os.path.abspath(filename)
if filename.startswith(base_dir):
filename = filename[len(base_dir)+1:]
print >> sys.stderr, ' %s at %s:%d' % (function, filename, line)
print >> sys.stderr, ' %s' % text
# Format the locals.
if local_variables:
print >> sys.stderr
print >> sys.stderr, 'Locals:'
longest_variable = max([len(v) for v, _ in local_variables])
for variable, value in sorted(local_variables):
value = repr(value)
possibly_truncated_value = _AbbreviateMiddle(value, ' ... ', 1024)
truncation_indication = ''
if len(possibly_truncated_value) != len(value):
truncation_indication = ' (truncated)'
print >> sys.stderr, ' %s: %s%s' % (variable.ljust(longest_variable + 1),
possibly_truncated_value,
truncation_indication)
# Format the exception.
print >> sys.stderr
print >> sys.stderr, exception
| 2.578125
| 3
|
easyai/base_name/task_name.py
|
lpj0822/image_point_cloud_det
| 1
|
12774215
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:
class TaskName():
Classify_Task = "classify"
Detect2d_Task = "detect2d"
Segment_Task = "segment"
PC_Classify_Task = "pc_classify"
| 1.226563
| 1
|
scr/target/TargetScan/geTarmir.py
|
chunjie-sam-liu/miRNASNP-v3
| 1
|
12774216
|
<gh_stars>1-10
import os,re
with open("mir_seed_2_8.txt","a") as mirseed:
RULE1={"A":"U","C":"G","G":"C","U":"A"}
with open("wild_mir.fa") as mirseq:
line = mirseq.readline().strip()
while(line):
if line.startswith('>'):
mirid = re.split(r'\s',line)[0][1:]
cmd1 = "grep -w "+ mirid + " mature.gff3"
# print(cmd1 +'\n')
output = os.popen(cmd1)
mirinfo = output.read().strip().split()
# print(mirinfo)
# print("\n")
curseq = mirseq.readline().strip()
if mirinfo[6] == '-':
new_seq = "".join(map(lambda x:RULE1[x],curseq))[::-1]
seed_seq = new_seq[1:8] #from 1 to 7
else:
seed_seq = curseq[1:8]
mirseed.write(mirid[4:]+"\t"+seed_seq+"\t9606"+"\n")
line = mirseq.readline().strip()
| 2.328125
| 2
|
data_loader.py
|
eXascaleInfolab/Wiki2Prop
| 2
|
12774217
|
<reponame>eXascaleInfolab/Wiki2Prop<filename>data_loader.py
import argparse
import pickle
import logging
import os
import pandas as pd
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
import numpy as np
np.set_printoptions(suppress=True)
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
ROOT_PATH = os.path.join("//net","dataset")
ROOT_PATH = os.path.join("//var","cache","fscache","audiffre")
PATH_TO_CLEANED_DATA = os.path.join(ROOT_PATH, "pp","data","cleaned_data")
PATH_TO_CLEANED_DATA = os.path.join(ROOT_PATH, "cleaned_data")
def get_all_indexes(self, year = "2018", embedding = "300LG"):
ID_train = np.load(os.path.join(PATH_TO_CLEANED_DATA, "indexes_train.npy"))
ID_valid = np.load(os.path.join(PATH_TO_CLEANED_DATA, "indexes_valid.npy"))
ID_test = np.load(os.path.join(PATH_TO_CLEANED_DATA, "indexes_test.npy"))
all_ids = np.concatenate([ID_train, ID_valid, ID_test])
return all_ids
class DataLoader() :
def __init__(self, year = "2018", embedding = "300LG",with_image = True):
if year == "2018":
if embedding =="300LG":
if with_image :
path_X = os.path.join(PATH_TO_CLEANED_DATA,"X_full_2018.npy")
else :
path_X = os.path.join(PATH_TO_CLEANED_DATA,"X_cleaned_20180813.npy")
path_Y = os.path.join(PATH_TO_CLEANED_DATA,"Y_cleaned_20180813.npy")
elif year == "2019":
if embedding == "300LG":
if with_image :
path_X = os.path.join(PATH_TO_CLEANED_DATA,"X_full_2019.npy")
else :
path_X = os.path.join(PATH_TO_CLEANED_DATA,"X_cleaned_20190916.npy")
path_Y = os.path.join(PATH_TO_CLEANED_DATA,"Y_cleaned_20190916.npy")
if with_image:
X =pd.read_feather(path_X)
indexes = X.C_index
X.index = indexes
self.X = X.drop(columns = ["C_index"])
else :
self.X = pd.read_pickle(path_X)
self.Y = pd.read_pickle(path_Y)
def get_train(self):
ID = np.load(os.path.join(PATH_TO_CLEANED_DATA, "indexes_train.npy"))
X = self.X.loc[ID].values
Y = self.Y.loc[ID].values
is_valid = (np.sum(np.isnan(X),axis=1)==0)
X=X[is_valid]
Y=Y[is_valid]
return X, Y
def get_valid(self):
ID = np.load(os.path.join(PATH_TO_CLEANED_DATA, "indexes_valid.npy"))
X = self.X.loc[ID].values
Y = self.Y.loc[ID].values
is_valid = (np.sum(np.isnan(X),axis=1)==0)
X=X[is_valid]
Y=Y[is_valid]
return X, Y
def get_test(self):
ID = np.load(os.path.join(PATH_TO_CLEANED_DATA, "indexes_test.npy"))
X = self.X.loc[ID].values
Y = self.Y.loc[ID].values
is_valid = (np.sum(np.isnan(X),axis=1)==0)
X=X[is_valid]
Y=Y[is_valid]
return X, Y
def get_all(self):
X = self.X
Y = self.Y
is_valid = (np.sum(np.isnan(X),axis=1)==0)
X=X[is_valid]
Y=Y[is_valid]
return X.values, Y.values, X.index
def get_test_index(self):
ID = np.load(os.path.join(PATH_TO_CLEANED_DATA, "indexes_test.npy"))
X = self.X.loc[ID]
is_valid = (np.sum(np.isnan(X),axis=1)==0)
X=X[is_valid]
return X.index
def get_topclass(self):
ID = np.load(os.path.join("/net","dataset","pp","data",'TopClass_20180813_indexes_test.npy'))
X = self.X.loc[ID].values
Y = self.Y.loc[ID].values
is_valid = (np.sum(np.isnan(X),axis=1)==0)
X=X[is_valid]
Y=Y[is_valid]
return X, Y
def get_q(self, Q):
X = self.X.loc[[Q]].values
Y = self.Y.loc[[Q]].values
is_valid = (np.sum(np.isnan(X),axis=1)==0)
X=X[is_valid]
Y=Y[is_valid]
return X, Y
def get_prediction_transformation(self):
prop_transform = np.load(os.path.join(PATH_TO_CLEANED_DATA,"properties_2019_to_2018.npy"))
return prop_transform
def get_properties(self):
properties = np.load(os.path.join(PATH_TO_CLEANED_DATA,"properties_2018.npy"), allow_pickle=True)
return properties
def get_nlabels(self):
return self.Y.shape[1]
def get_ndims(self):
return self.X.shape[1]
def clean(self):
self.X = None
self.Y = None
if __name__ == "__main__":
#data = DataLoader(year = "2019")
#X_train, Y_train = data.get_train()
s = os.path.relpath((os.path.join("..","data","classes.pickle")),start=os.path.curdir)
print(s)
Z= pickle.load( open(s,"rb" ) )
| 2.171875
| 2
|
huxley/api/tests/test_user.py
|
bmun/huxley
| 18
|
12774218
|
# Copyright (c) 2011-2015 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
import json
from django.urls import reverse
from django.test import TestCase
from django.test.client import Client
from rest_framework import exceptions
from huxley.accounts.models import User
from huxley.core.models import Conference
from huxley.api import tests
from huxley.api.tests import auto
from huxley.utils.test import models
class UserDetailGetTestCase(tests.RetrieveAPITestCase):
url_name = 'api:user_detail'
def test_anonymous_user(self):
'''It should reject request from an anonymous user.'''
user = models.new_user()
response = self.get_response(user.id)
self.assertNotAuthenticated(response)
def test_other_user(self):
'''It should reject request from another user.'''
user1 = models.new_user(username='user1')
user2 = models.new_user(username='user2', password='<PASSWORD>')
self.client.login(username='user2', password='<PASSWORD>')
response = self.get_response(user1.id)
self.assertPermissionDenied(response)
def test_superuser(self):
'''It should return the correct fields for a superuser.'''
user1 = models.new_user(username='user1')
user2 = models.new_superuser(username='user2', password='<PASSWORD>')
self.client.login(username='user2', password='<PASSWORD>')
response = self.get_response(user1.id)
self.assertEqual(response.data, {
'id': user1.id,
'username': user1.username,
'first_name': user1.first_name,
'last_name': user1.last_name,
'user_type': user1.user_type,
'school': user1.school_id,
'committee': user1.committee_id,
'delegate': user1.delegate_id
})
def test_self(self):
'''It should return the correct fields for a single user.'''
school = models.new_school()
user = school.advisor
self.client.login(username=user.username, password='<PASSWORD>')
response = self.get_response(user.id)
self.assertEqual(response.data, {
'id': user.id,
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
'user_type': user.user_type,
'school': {
'id': school.id,
'name': school.name,
'address': school.address,
'city': school.city,
'state': school.state,
'zip_code': school.zip_code,
'country': school.country,
'primary_name': school.primary_name,
'primary_gender': school.primary_gender,
'primary_email': school.primary_email,
'primary_phone': school.primary_phone,
'primary_type': school.primary_type,
'secondary_name': school.secondary_name,
'secondary_gender': school.secondary_gender,
'secondary_email': school.secondary_email,
'secondary_phone': school.secondary_phone,
'secondary_type': school.secondary_type,
'program_type': school.program_type,
'times_attended': school.times_attended,
'international': school.international,
},
'committee': user.committee_id,
'delegate': user.delegate_id
})
def test_chair(self):
'''It should have the correct fields for chairs.'''
committee = models.new_committee()
user = models.new_user(
username='testuser',
password='<PASSWORD>',
user_type=User.TYPE_CHAIR,
committee_id=committee.id)
self.client.login(username='testuser', password='<PASSWORD>')
response = self.get_response(user.id)
self.assertEqual(response.data, {
'id': user.id,
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
'user_type': user.user_type,
'school': user.school_id,
'committee': user.committee_id,
'delegate': user.delegate_id
})
class UserDetailDeleteTestCase(auto.DestroyAPIAutoTestCase):
url_name = 'api:user_detail'
@classmethod
def get_test_object(cls):
return models.new_user()
def test_anonymous_user(self):
'''It should reject the request from an anonymous user.'''
self.do_test(expected_error=auto.EXP_NOT_AUTHENTICATED)
def test_other_user(self):
'''It should reject the request from another user.'''
models.new_school(user=self.default_user)
self.as_default_user().do_test(
expected_error=auto.EXP_PERMISSION_DENIED)
def test_self(self):
'''It should allow a user to delete themself.'''
self.as_user(self.object).do_test()
def test_superuser(self):
'''It should allow a superuser to delete a user.'''
self.as_superuser().do_test()
class UserDetailPatchTestCase(tests.PartialUpdateAPITestCase):
url_name = 'api:user_detail'
params = {'first_name': 'first', 'last_name': 'last'}
def setUp(self):
self.user = models.new_user(username='user1', password='<PASSWORD>')
def test_anonymous_user(self):
'''An anonymous user should not be able to change information.'''
response = self.get_response(self.user.id, params=self.params)
self.assertNotAuthenticated(response)
user = User.objects.get(id=self.user.id)
self.assertEqual(user.first_name, 'Test')
self.assertEqual(user.last_name, 'User')
def test_other_user(self):
'''Another user should not be able to change information about any other user.'''
models.new_user(username='user2', password='<PASSWORD>')
self.client.login(username='user2', password='<PASSWORD>')
response = self.get_response(self.user.id, params=self.params)
self.assertPermissionDenied(response)
user = User.objects.get(id=self.user.id)
self.assertEqual(user.first_name, 'Test')
self.assertEqual(user.last_name, 'User')
def test_self(self):
'''A User should be allowed to change information about himself.'''
self.client.login(username='user1', password='<PASSWORD>')
response = self.get_response(self.user.id, params=self.params)
user = User.objects.get(id=self.user.id)
self.assertEqual(response.data['first_name'], user.first_name)
self.assertEqual(response.data['last_name'], user.last_name)
def test_superuser(self):
'''A superuser should be allowed to change information about a user.'''
models.new_superuser(username='user2', password='<PASSWORD>')
self.client.login(username='user2', password='<PASSWORD>')
response = self.get_response(self.user.id, params=self.params)
user = User.objects.get(id=self.user.id)
self.assertEqual(response.data['first_name'], user.first_name)
self.assertEqual(response.data['last_name'], user.last_name)
class UserListGetTestCase(tests.ListAPITestCase):
url_name = 'api:user_list'
def test_anonymous_user(self):
'''It should reject the request from an anonymous user.'''
models.new_user(username='user1')
models.new_user(username='user2')
response = self.get_response()
self.assertNotAuthenticated(response)
def test_user(self):
'''It should reject the request from a regular user.'''
models.new_user(username='user1', password='<PASSWORD>')
models.new_user(username='user2')
self.client.login(username='user1', password='<PASSWORD>')
response = self.get_response()
self.assertPermissionDenied(response)
def test_superuser(self):
'''It should allow a superuser to list all users.'''
user1 = models.new_superuser(username='user1', password='<PASSWORD>')
user2 = models.new_user(username='user2')
self.client.login(username='user1', password='<PASSWORD>')
response = self.get_response()
self.assertEqual(response.data, [
{'id': user1.id,
'username': user1.username,
'first_name': user1.first_name,
'last_name': user1.last_name,
'user_type': user1.user_type,
'school': user1.school_id,
'committee': user1.committee_id,
'delegate': user1.delegate_id},
{'id': user2.id,
'username': user2.username,
'first_name': user2.first_name,
'last_name': user2.last_name,
'user_type': user2.user_type,
'school': user2.school_id,
'committee': user2.committee_id,
'delegate': user2.delegate_id},
])
class UserListPostTestCase(tests.CreateAPITestCase):
url_name = 'api:user_list'
params = {'username': 'Kunal',
'password': 'password',
'first_name': 'Kunal',
'last_name': 'Mehta'}
def test_valid(self):
params = self.get_params()
response = self.get_response(params)
user_query = User.objects.filter(id=response.data['id'])
self.assertTrue(user_query.exists())
user = User.objects.get(id=response.data['id'])
self.assertEqual(response.data, {
'id': user.id,
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
'user_type': User.TYPE_ADVISOR,
'school': user.school_id,
'email': user.email
})
def test_empty_username(self):
response = self.get_response(params=self.get_params(username=''))
self.assertEqual(response.data, {
'username': [u'This field may not be blank.']
})
def test_taken_username(self):
models.new_user(username='_Kunal', password='<PASSWORD>')
response = self.get_response(params=self.get_params(username='_Kunal'))
self.assertEqual(response.data, {
'username': [u'A user with that username already exists.']
})
def test_invalid_username(self):
response = self.get_response(params=self.get_params(username='>Kunal'))
self.assertEqual(response.data['username'], [
exceptions.ErrorDetail(u'Enter a valid username. This value may contain only '
u'letters, numbers, and @/./+/-/_ characters.', code='invalid')
]
)
def test_empty_password(self):
response = self.get_response(params=self.get_params(password=''))
self.assertEqual(response.data, {
'password': [u'This field may not be blank.']
})
def test_invalid_password(self):
response = self.get_response(params=self.get_params(password='><PASSWORD>'))
self.assertEqual(response.data, {
'password': ['<PASSWORD>.']
})
def test_empty_first_name(self):
response = self.get_response(params=self.get_params(first_name=''))
self.assertEqual(response.data, {
'first_name': ['This field is required.']
})
def test_empty_last_name(self):
response = self.get_response(params=self.get_params(last_name=''))
self.assertEqual(response.data, {
'last_name': ['This field is required.']
})
def test_username_length(self):
response = self.get_response(params=self.get_params(username='user'))
self.assertEqual(response.data, {
'username': ['Username must be at least 5 characters.']
})
def test_password_length(self):
response = self.get_response(params=self.get_params(password='<PASSWORD>'))
self.assertEqual(response.data, {
'password': ['Password must be at least 6 characters.']
})
def test_invalid(self):
conf = Conference.get_current()
conf.open_reg = False
conf.save()
params = self.get_params()
response = self.get_response(params)
self.assertEqual(response.data, {
'detail': 'Conference registration is closed.'
})
conf.open_reg = True
conf.save()
class CurrentUserTestCase(TestCase):
fixtures = ['conference']
def setUp(self):
self.client = Client()
self.url = reverse('api:current_user')
self.maxDiff = None
def get_data(self, url):
return json.loads(self.client.get(url).content)
def test_login(self):
user = models.new_user(username='lol', password='<PASSWORD>')
user2 = models.new_user(username='bunny', password='<PASSWORD>')
credentials = {'username': 'lol', 'password': '<PASSWORD>'}
response = self.client.post(
self.url,
data=json.dumps(credentials),
content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertEqual(int(self.client.session['_auth_user_id']), user.id)
credentials = {'username': 'bunny', 'password': '<PASSWORD>'}
response = self.client.post(
self.url,
data=json.dumps(credentials),
content_type='application/json')
self.assertEqual(int(self.client.session['_auth_user_id']), user.id)
data = json.loads(response.content)
self.assertEqual(data['detail'],
'Another user is currently logged in.')
def test_logout(self):
user = models.new_user(username='lol', password='<PASSWORD>')
self.client.login(username='lol', password='<PASSWORD>')
self.assertEqual(int(self.client.session['_auth_user_id']), user.id)
response = self.client.delete(self.url)
self.assertEqual(response.status_code, 200)
self.assertTrue('_auth_user_id' not in self.client.session)
def test_get(self):
data = self.get_data(self.url)
self.assertEqual(len(data.keys()), 1)
self.assertEqual(data['detail'], u'Not found.')
school = models.new_school()
user = school.advisor
self.client.login(username=user.username, password='<PASSWORD>')
data = self.get_data(self.url)
self.assertEqual(len(data.keys()), 8)
self.assertEqual(data['id'], user.id)
self.assertEqual(data['username'], user.username)
self.assertEqual(data['first_name'], user.first_name)
self.assertEqual(data['last_name'], user.last_name)
self.assertEqual(data['user_type'], User.TYPE_ADVISOR)
self.assertEqual(data['school'], {
u'id': school.id,
u'name': str(school.name),
u'address': str(school.address),
u'city': str(school.city),
u'state': str(school.state),
u'zip_code': str(school.zip_code),
u'country': str(school.country),
u'primary_name': str(school.primary_name),
u'primary_gender': school.primary_gender,
u'primary_email': str(school.primary_email),
u'primary_phone': str(school.primary_phone),
u'primary_type': school.primary_type,
u'secondary_name': str(school.secondary_name),
u'secondary_gender': school.secondary_gender,
u'secondary_email': str(school.secondary_email),
u'secondary_phone': str(school.secondary_phone),
u'secondary_type': school.secondary_type,
u'program_type': school.program_type,
u'times_attended': school.times_attended,
u'international': school.international,
})
class DelegateUserCreateTestCase(tests.PartialUpdateAPITestCase):
url_name = 'api:delegate_detail'
def setUp(self):
self.advisor = models.new_user(username='advisor', password='<PASSWORD>')
self.school = models.new_school(user=self.advisor)
self.registration = models.new_registration(school=self.school)
self.assignment = models.new_assignment(registration=self.registration)
self.delegate = models.new_delegate(
school=self.school, assignment=self.assignment)
self.delegate.assignment = None
self.delegate.save()
self.superuser = models.new_user(is_superuser=True)
self.params = {'email': '<EMAIL>'}
self.assign_params = {'assignment': self.assignment.id}
self.unassign_params = {'assignment': None}
def test_delegate_no_user(self):
self.client.login(username='advisor', password='<PASSWORD>')
response = self.get_response(self.delegate.id, params=self.params)
self.assertFalse(
User.objects.filter(delegate__id=self.delegate.id).exists())
def test_delegate_user_create(self):
self.client.login(username='advisor', password='<PASSWORD>')
response = self.get_response(
self.delegate.id, params=self.assign_params)
self.assertTrue(
User.objects.filter(delegate__id=self.delegate.id).exists())
def test_delegate_user_unassign(self):
self.client.login(username='advisor', password='<PASSWORD>')
response1 = self.get_response(
self.delegate.id, params=self.assign_params)
response2 = self.get_response(
self.delegate.id, params=self.unassign_params)
self.assertTrue(
User.objects.filter(delegate__id=self.delegate.id).exists())
class DelegateUserDestroyTestCase(tests.DestroyAPITestCase):
url_name = 'api:delegate_detail'
def setUp(self):
self.advisor = models.new_user(username='advisor', password='<PASSWORD>')
self.school = models.new_school(user=self.advisor)
self.registration = models.new_registration(school=self.school)
self.assignment = models.new_assignment(registration=self.registration)
self.delegate = models.new_delegate(
school=self.school, assignment=self.assignment)
self.delegate.assignment = None
self.delegate.save()
self.superuser = models.new_user(is_superuser=True)
self.delegate_user = models.new_user(
username='delegate',
delegate=self.delegate,
user_type=User.TYPE_DELEGATE)
def test_delegate_user_destroy(self):
self.client.login(username='advisor', password='<PASSWORD>')
response = self.get_response(self.delegate.id)
self.assertFalse(
User.objects.filter(delegate__id=self.delegate.id).exists())
| 2.234375
| 2
|
scripts/v2-checks.py
|
coltekin/gk-treebank
| 3
|
12774219
|
<filename>scripts/v2-checks.py
#!/usr/bin/env python3
import sys, argparse, re
from udtools.conllu import conllu_sentences
def is_predicate(node, sent):
if node.deprel in {'root', 'parataxis'}:
# This may not always be correct
return True
if node.upos == 'VERB':
return True
elif node.deprel in {'xcomp', 'ccomp', 'advcl', 'acl'}:
return True
else:
for child in sent.children_of(node):
if child.deprel == 'cop':
return True
ap = argparse.ArgumentParser()
ap.add_argument('input_file')
args = ap.parse_args()
tb = conllu_sentences(args.input_file)
for sent_num, sent in enumerate(tb):
for i, node in enumerate(sent.nodes[1:]):
head = sent.nodes[node.head]
case = node.get_feat('Case')
if node.deprel in {'obj', 'xcomp', 'ccomp'}\
and case not in {'Acc', 'Nom'}\
and not (node.upos == 'VERB' and node.get_feat('VerbType') is None):
match = True
for child in sent.children_of(node):
if child.deprel == 'cop' and child.get_feat('Case') in {'Acc', 'Nom'}:
match = False
if node.upos == 'ADJ':
match = False
if match:
print("{} {:04d}-{} {} is not nominatinve or accusative ({})".format(
args.input_file, sent_num+1, node.index, node.deprel, case))
if node.deprel == 'obl' and case in {'Acc', 'Nom'}:
match = True
for child in sent.children_of(node):
if child.deprel == 'case':
match = False
if match:
print("{} {:04d}-{} {} with {} dependent.".format(
args.input_file, sent_num+1, node.index, node.deprel, case))
if node.deprel in {'iobj'}:
print("{} {:04d}-{} The deprel {} should not be used.".format(
args.input_file, sent_num+1, node.index, node.deprel))
if node.deprel in {'acl', 'case'} and \
head.upos not in {'NOUN', 'PRON', 'PROPN', 'NUM'}:
print("{} {:04d}-{} The deprel {} should modify a nominal. ".format(
args.input_file, sent_num+1, node.index, node.deprel))
if node.deprel in {'advcl', 'mark'} and \
not is_predicate(head, sent):
print("{} {:04d}-{} The deprel {} should modify a predicate. ".format(
args.input_file, sent_num+1, node.index, node.deprel))
if node.deprel in {'obj'} and \
head.deprel in {'NOUN', 'ADJ', 'PRON', 'PROPN'}:
print("{} {04d}-{} The deprel '{}' should not modify nouns ".format(
args.input_file, sent_num+1, node.index, node.deprel))
| 2.765625
| 3
|
roulette.py
|
Finegorko/roulette_game
| 0
|
12774220
|
# подключаем модуль случайных чисел
import random
# подключаем модуль для графиков
import plotly.graph_objs as go
# сколько денег будет на старте для каждой стратегии
startmoney = 1000000
# коэффициент ставки
c1 = 0.001
# количество побед и проигрышей
win = 0
loose = 0
# количество игр, сыгранный по первой стратегии
games = 0
# статистика для первой стратегии
balance1 = []
games1 = []
# статистика для второй стратегии
balance2 = []
games2 = []
# статистика для третьей стратегии
balance3 = []
games3 = []
# начинаем играть с полной суммой
# первая стратегия — отрицательное матожидание, как в казино
money = startmoney
# пока у нас ещё есть деньги
while money > 0:
# ставка — постоянная часть от первоначальной суммы
bet = startmoney * c1
# если ставка получилась больше, чем у нас осталось денег — ставим всё, что осталось, чтобы не уйти в минус
if bet > money:
bet = money
# после ставки количество денег уменьшилось
money -= bet
# записываем очередную игру в статистику — деньги и номер игры
balance1.append(money)
games1.append(len(games1) + 1)
# крутим рулетку, на которой 18 чёрных чисел, 18 красных и одно зеро. Мы ставим на чёрное
ball = random.randint(1, 37)
# пусть первые 18 будут чёрными — для простоты алгоритма
# если наша ставка сыграла — мы попали в нужный диапазон
if ball in range(1, 19):
# получаем назад нашу ставку в двойном размере
money += bet * 2
# увеличиваем количество побед
win += 1
else:
# иначе — увеличиваем количество проигрышей
loose += 1
games = win + loose
# выводим результат игры по первой стратегии
print(
"Выиграно ставок: "
+ str(win)
+ " ("
+ str(win / games * 100)
+ "%). "
+ " Проиграно ставок: "
+ str(loose)
+ " ("
+ str(loose / games * 100)
+ "%). "
)
# началась вторая стратегия, тоже стартуем с полной суммой
# вторая стратегия — с нулевым матожиданием
money = startmoney
# обнуляем статистику
win = 0
loose = 0
# начинаем играть с полной суммой
money = startmoney
# играем, пока есть деньги или пока мы не сыграем столько же игр, как и в первый раз
while (money > 0) and (win + loose < games):
# ставка — постоянная часть от первоначальной суммы
bet = startmoney * c1
# если ставка получилась больше, чем у нас осталось денег — ставим всё, что осталось, чтобы не уйти в минус
if bet > money:
bet = money
# после ставки количество денег уменьшилось
money -= bet
# записываем очередную игру в статистику — деньги и номер игры
balance2.append(money)
games2.append(len(games2) + 1)
# крутим рулетку, на которой 18 чёрных чисел, 18 красных. Так как всего поровну, матожидание будет равно нулю.
# Ставим, как и в прошлом случае, на чёрное
ball = random.randint(1, 36)
# пусть первые 18 будут чёрными — для простоты алгоритма
# если наша ставка сыграла — мы попали в нужный диапазон
if ball in range(1, 19):
# получаем назад нашу ставку в двойном размере
money += bet * 2
# увеличиваем количество побед
win += 1
else:
# иначе — увеличиваем количество проигрышей
loose += 1
# выводим результат игры по второй стратегии
print(
"Выиграно ставок: "
+ str(win)
+ " ("
+ str(win / games * 100)
+ "%). "
+ " Проиграно ставок: "
+ str(loose)
+ " ("
+ str(loose / games * 100)
+ "%). "
)
# началась третья стратегия, тоже стартуем с полной суммой
# третья стратегия — с положительным матожиданием
money = startmoney
# обнуляем статистику
win = 0
loose = 0
# начинаем играть с полной суммой
money = startmoney
# играем, пока есть деньги или пока мы не сыграем столько же игр, как и в первый раз
while (money > 0) and (win + loose < games):
# ставка — постоянная часть от первоначальной суммы
bet = startmoney * c1
# если ставка получилась больше, чем у нас осталось денег — ставим всё, что осталось, чтобы не уйти в минус
if bet > money:
bet = money
# после ставки количество денег уменьшилось
money -= bet
# записываем очередную игру в статистику — деньги и номер игры
balance3.append(money)
games3.append(len(games3) + 1)
# крутим рулетку, на которой 18 чёрных чисел, 17 красных. Так как чёрных больше, а мы ставим на чёрное, то матожидание будет положительным
# Ставим, как и в прошлом случае, на чёрное
ball = random.randint(1, 35)
# пусть первые 18 будут чёрными — для простоты алгоритма
# если наша ставка сыграла — мы попали в нужный диапазон
if ball in range(1, 19):
# получаем назад нашу ставку в двойном размере
money += bet * 2
# увеличиваем количество побед
win += 1
else:
# иначе — увеличиваем количество проигрышей
loose += 1
# выводим результат игры по третьей стратегии
print(
"Выиграно ставок: "
+ str(win)
+ " ("
+ str(win / games * 100)
+ "%). "
+ " Проиграно ставок: "
+ str(loose)
+ " ("
+ str(loose / games * 100)
+ "%). "
)
# строим графики
fig = go.Figure()
# для первой стратегии
fig.add_trace(
go.Scatter(x=games1, y=balance1, name="Отрицательное матожидание")
)
# для второй
fig.add_trace(go.Scatter(x=games2, y=balance2, name="Нулевое матожидание"))
# и для третьей
fig.add_trace(
go.Scatter(x=games3, y=balance3, name="Положительное матожидание")
)
# выводим графики в браузер
fig.show()
| 3.203125
| 3
|
mutadi/private_messages/tests/test_urls.py
|
etiennody/mutadi
| 1
|
12774221
|
"""Unit tests for posts app urls
"""
import pytest
from django.contrib.auth import get_user_model
from django.urls import resolve, reverse
from model_bakery import baker
from mutadi.private_messages.models import PrivateMessage
pytestmark = pytest.mark.django_db
User = get_user_model()
class TestPrivateMessageUrls:
"""Group multiple tests for Private Message urls"""
@pytest.fixture
def proto_user(self):
"""Fixture for baked User model."""
return baker.make(
User,
username=baker.seq("User-"),
_quantity=3,
)
@pytest.fixture
def proto_private_message(self, proto_user):
"""Fixture for baked PrivateMessage model."""
return baker.make(
PrivateMessage,
sender=proto_user[0],
recipient=proto_user[1],
content=(
"Proident nisi cillum sit tempor "
"reprehenderit proident in non fugiat ex id."
),
)
def test_inbox_reverse(self):
"""inbox should reverse to /messages/inbox/."""
assert reverse("inbox") == "/messages/inbox/"
def test_inbox_resolve(self):
"""/messages/inbox/ should resolve to inbox."""
assert resolve("/messages/inbox/").view_name == "inbox"
def test_outbox_reverse(self):
"""outbox should reverse to /messages/outbox/."""
assert reverse("outbox") == "/messages/outbox/"
def test_outbox_resolve(self):
"""/messages/outbox/ should resolve to outbox."""
assert resolve("/messages/outbox/").view_name == "outbox"
def test_delete_message_reverse(self, proto_private_message):
"""
delete_message should reverse to
/messages/message_detail/{proto_private_message.pk}/delete.
"""
assert (
reverse(
"delete_message",
args=[
f"{proto_private_message.pk}",
],
)
== f"/messages/message_detail/{proto_private_message.pk}/delete"
)
def test_delete_message_resolve(self, proto_private_message):
"""
/messages/message_detail/{proto_private_message.pk}/delete
should resolve to delete_message.
"""
assert (
resolve(
f"/messages/message_detail/{proto_private_message.pk}/delete"
).view_name
== "delete_message"
)
def test_message_detail_reverse(self, proto_private_message):
"""
message_detail should reverse to
/messages/message_detail/{proto_private_message.pk}.
"""
assert (
reverse(
"message_detail",
args=[
f"{proto_private_message.pk}",
],
)
== f"/messages/message_detail/{proto_private_message.pk}"
)
def test_message_detail_resolve(self, proto_private_message):
"""
/messages/message_detail/{proto_private_message.pk}
should resolve to message_detail.
"""
assert (
resolve(
f"/messages/message_detail/{proto_private_message.pk}"
).view_name
== "message_detail"
)
def test_compose_message_reverse(self):
"""
compose_message should reverse to /messages/compose_message/.
"""
assert reverse("compose_message") == "/messages/compose_message/"
def test_compose_message_resolve(self):
"""
/messages/compose_message/ should resolve to compose_message.
"""
assert (
resolve("/messages/compose_message/").view_name
== "compose_message"
)
| 2.484375
| 2
|
exercises_generation.py
|
Valadis-Mastoras/SemiGramEx
| 1
|
12774222
|
<reponame>Valadis-Mastoras/SemiGramEx
import pandas as pd
import random
import spacy
from sentences_retrieval import retrieve_verb_tense
from exercise_types_creation import generate_grammar_mistake, generate_multiple_choice, generate_fib_verb
from helper_functions import PDF, extra_preprocessing, profanity_check
# Generate mistakes for the Find-mistakes and Multiple-choice exercises
def generate_mistakes_verb(retrieved_tuple, verb_tense, spacy_model, exercise_type):
mistakes_generations = []
original_sentences = []
for elem in retrieved_tuple:
original_sentence = elem[0]
sentence_tokens = elem[1]
target_aux_index = elem[3]
target_verb_index = elem[4]
person = elem[6][0]
number = str(elem[7][0])
# sometimes the library use a string number instead of a number
if person == 'One':
person = 1
elif person == 'Two':
person = 2
elif person == 'Three':
person = 3
else:
person = int(person)
# verb to be ill-injected
target_sentence_verb = sentence_tokens[target_verb_index]
# we take the lemma form of the target verb for better results of the conversion library
verb_token_model = spacy_model(target_sentence_verb)
target_verb_lemma = "".join([token.lemma_ for token in verb_token_model])
if exercise_type == 'mistakes':
error_exercise = generate_grammar_mistake(sentence_tokens, target_verb_index, target_aux_index, verb_tense, target_verb_lemma, person, number)
elif exercise_type == 'multiple':
error_exercise = generate_multiple_choice(sentence_tokens, target_verb_index, target_aux_index, verb_tense, target_verb_lemma, person, number)
mistakes_generations.append(error_exercise)
original_sentences.append(original_sentence)
return (original_sentences, mistakes_generations)
# In this function the selection of the appropriate verb tense, along with the necessary parameters takes place
def verb_tense_generation(corpus, learner_level, sentence_number, verb_tense_couple, exercise_type, shuffle_instances):
# we aim on retrieving an equal number of sentences for each verb tense
sentence_number = int(sentence_number / 2)
# We retrieve the appropriate sentences based on the given parameters each time and we generate the relevant exercise types
# Simple Present/ Present Progressive
if verb_tense_couple == 1:
if exercise_type == 'fib':
retrieve_simple_present = retrieve_verb_tense(corpus, learner_level, sentence_number, 1, False, 'fib', None)
retrieve_present_progressive = retrieve_verb_tense(corpus, learner_level, sentence_number, 3, True, 'fib', None)
simple_present = generate_fib_verb(retrieve_simple_present)
present_progressive = generate_fib_verb(retrieve_present_progressive)
elif exercise_type == 'mistakes':
spacy_model = spacy.load('en_core_web_sm')
retrieve_simple_present = retrieve_verb_tense(corpus, learner_level, sentence_number, 1, False, 'mistakes', spacy_model)
retrieve_present_progressive = retrieve_verb_tense(corpus, learner_level, sentence_number, 3, True, 'mistakes', spacy_model)
simple_present = generate_mistakes_verb(retrieve_simple_present, 'simple_present', spacy_model, 'mistakes')
present_progressive = generate_mistakes_verb(retrieve_present_progressive, 'present_progressive', spacy_model, 'mistakes')
elif exercise_type == 'multiple':
spacy_model = spacy.load('en_core_web_sm')
retrieve_simple_present = retrieve_verb_tense(corpus, learner_level, sentence_number, 1, False, 'multiple', spacy_model)
retrieve_present_progressive = retrieve_verb_tense(corpus, learner_level, sentence_number, 3, True, 'multiple', spacy_model)
simple_present = generate_mistakes_verb(retrieve_simple_present, 'simple_present', spacy_model, 'multiple',)
present_progressive = generate_mistakes_verb(retrieve_present_progressive, 'present_progressive', spacy_model, 'multiple',)
generated_sentences = simple_present[1] + present_progressive[1]
original_generated_sentences = simple_present[0] + present_progressive[0]
# Simple Past/ Past Progressive
elif verb_tense_couple == 2:
if exercise_type == 'fib':
retrieve_simple_past = retrieve_verb_tense(corpus, learner_level, sentence_number, 2, False, 'fib', None)
retrieve_past_progressive = retrieve_verb_tense(corpus, learner_level, sentence_number, 4, True, 'fib', None)
simple_past = generate_fib_verb(retrieve_simple_past)
past_progressive = generate_fib_verb(retrieve_past_progressive)
elif exercise_type == 'mistakes':
spacy_model = spacy.load('en_core_web_sm')
retrieve_simple_past = retrieve_verb_tense(corpus, learner_level, sentence_number, 2, False, 'mistakes', spacy_model)
retrieve_past_progressive = retrieve_verb_tense(corpus, learner_level, sentence_number, 4, True, 'mistakes', spacy_model)
simple_past = generate_mistakes_verb(retrieve_simple_past, 'simple_past', spacy_model, 'mistakes')
past_progressive = generate_mistakes_verb(retrieve_past_progressive, 'past_progressive', spacy_model, 'mistakes')
elif exercise_type == 'multiple':
spacy_model = spacy.load('en_core_web_sm')
retrieve_simple_past = retrieve_verb_tense(corpus, learner_level, sentence_number, 2, False, 'multiple', spacy_model)
retrieve_past_progressive = retrieve_verb_tense(corpus, learner_level, sentence_number, 4, True, 'multiple', spacy_model)
simple_past = generate_mistakes_verb(retrieve_simple_past, 'simple_past', spacy_model, 'multiple')
past_progressive = generate_mistakes_verb(retrieve_past_progressive, 'past_progressive', spacy_model, 'multiple')
generated_sentences = simple_past[1] + past_progressive[1]
original_generated_sentences = simple_past[0] + past_progressive[0]
# Present/Past Perfect
elif verb_tense_couple == 3:
if exercise_type == 'fib':
retrieve_present_perfect = retrieve_verb_tense(corpus, learner_level, sentence_number, 5, True, 'fib', None)
retrieve_past_perfect = retrieve_verb_tense(corpus, learner_level, sentence_number, 6, True, 'fib', None)
present_perfect = generate_fib_verb(retrieve_present_perfect)
past_perfect = generate_fib_verb(retrieve_past_perfect)
elif exercise_type == 'mistakes':
spacy_model = spacy.load('en_core_web_sm')
retrieve_present_perfect = retrieve_verb_tense(corpus, learner_level, sentence_number, 5, True, 'mistakes', spacy_model)
retrieve_past_perfect = retrieve_verb_tense(corpus, learner_level, sentence_number, 6, True, 'mistakes', spacy_model)
present_perfect = generate_mistakes_verb(retrieve_present_perfect, 'present_perfect', spacy_model, 'mistakes')
past_perfect = generate_mistakes_verb(retrieve_past_perfect, 'past_perfect', spacy_model, 'mistakes')
elif exercise_type == 'multiple':
spacy_model = spacy.load('en_core_web_sm')
retrieve_present_perfect = retrieve_verb_tense(corpus, learner_level, sentence_number, 5, True, 'multiple', spacy_model)
retrieve_past_perfect = retrieve_verb_tense(corpus, learner_level, sentence_number, 6, True, 'multiple', spacy_model)
present_perfect = generate_mistakes_verb(retrieve_present_perfect, 'present_perfect', spacy_model, 'multiple')
past_perfect = generate_mistakes_verb(retrieve_past_perfect, 'past_perfect', spacy_model, 'multiple')
generated_sentences = present_perfect[1] + past_perfect[1]
original_generated_sentences = present_perfect[0] + past_perfect[0]
# shuffle for better results
packed_tuples = [(generated_sentences[index], original_generated_sentences[index]) for index, elem in enumerate(range(len(generated_sentences)))]
if shuffle_instances:
random.shuffle(packed_tuples)
gener_sentences = []
original_sentences = []
for tuple_index, elem in enumerate(range(len(packed_tuples))):
gener_sentences.append(packed_tuples[tuple_index][0])
original_sentences.append(packed_tuples[tuple_index][1])
original_fib_tuple = (original_sentences, gener_sentences)
# check whether enough instances exist in the corpus
if len(list(original_fib_tuple[1])) < sentence_number:
if len(list(original_fib_tuple[1])) == 0:
print("== NOTE: We are very sorry, but it seems that there is no sentence example of that Verb Tense in our corpus. ==")
print("== Perhaps a different difficulty level might help. == \n")
else:
print("== NOTE: We are very sorry, but it seems that there are only {} examples of the given exercise type in our corpus. == \n".format(len(list(original_fib_tuple[1]))))
print("== Perhaps a different difficulty level might help. == \n")
return original_fib_tuple
else:
return original_fib_tuple
# Generate the pdf exercises file for fill-in-the-blank and find-mistakes exercise
def generate_files_fib_finderror(corpus, learner_level, sentence_number, teach_goal, extra_options):
pdf = PDF()
pdf.alias_nb_pages()
pdf.add_page()
pdf.set_font('Arial', 'B', 13)
if teach_goal[3] == 'fib':
exercise_type = 'Fill-in-the-blank'
elif teach_goal[3] == 'mistakes':
exercise_type = 'Find-the-mistake'
elif teach_goal[3] == 'multiple':
exercise_type = 'Multiple-choice'
# if the teaching goal concerns Verb Tenses
if teach_goal[1] == 'verb_tense_even':
if teach_goal[2] == 'Simple_Progressive_Present':
pdf.cell(190, 0, 'Simple and Progressive Present, {} exercises for {} level'.format(exercise_type, learner_level), 0,0, 'C')
elif teach_goal[2] == 'Simple_Progressive_Past':
pdf.cell(190, 0, 'Simple and Progressive Past, {} exercises for {} level'.format(exercise_type, learner_level), 0,0, 'C')
pdf.ln(15)
pdf.cell(30, 10, 'Exercises', 0, 0, 'L')
pdf.ln(15)
pdf.set_font('Times', '', 10)
# in case of a shuffling option
if 'shuffle' in extra_options:
generated_fib = list(verb_tense_generation(corpus, learner_level, sentence_number, teach_goal[0], teach_goal[3], True))
else:
generated_fib = list(verb_tense_generation(corpus, learner_level, sentence_number, teach_goal[0], teach_goal[3], False))
clean_results = list(extra_preprocessing(generated_fib[1], generated_fib[0]))
elif teach_goal[1] == 'verb_tense_odd':
if teach_goal[2] == 'Perfect_Present_Past':
pdf.cell(190, 0, 'Present and Past Perfect, {} exercises for {} level'.format(exercise_type, learner_level), 0, 0, 'C')
pdf.ln(15)
pdf.cell(30, 10, 'Exercises', 0, 0, 'L')
pdf.ln(10)
pdf.set_font('Times', '', 10)
# in case of a shuffling option
if 'shuffle' in extra_options:
generated_fib = list(verb_tense_generation(corpus, learner_level, sentence_number, teach_goal[0], teach_goal[3], True))
else:
generated_fib = list(verb_tense_generation(corpus, learner_level, sentence_number, teach_goal[0], teach_goal[3], False))
clean_results = list(extra_preprocessing(generated_fib[1], generated_fib[0]))
for i, item in enumerate(clean_results[1]):
try:
pdf.multi_cell(0, 5, "{}. {} \n".format(i + 1, item.encode('latin-1', 'ignore').decode('latin-1')), 0, 1)
# in case the profanity option is checked
if 'profanity' in extra_options:
bad_words = profanity_check(item)
if bad_words:
string_bad_words = ', '.join(bad_words)
pdf.set_fill_color(220, 50, 50)
pdf.set_text_color(255, 255, 255)
pdf.cell(0, 5, "The sentence {}, contains these possibly topic-sensitive words: {}".format(i+1, string_bad_words.encode('latin-1', 'ignore').decode('latin-1')), 0, 0, 'L', 1)
pdf.ln(6)
pdf.set_text_color(0, 0, 0)
except UnicodeEncodeError:
print("An encoding error has occured for the item: {} \n".format(item))
print("That item has been skipped.\n")
# if solutions are asked
if 'solution' in extra_options:
pdf.ln(5)
pdf.set_font('Arial', 'B', 13)
pdf.cell(30, 10, 'Solutions', 0, 0, 'L')
pdf.ln(10)
pdf.set_font('Times', '', 10)
for i, item in enumerate(clean_results[0]):
try:
pdf.multi_cell(0, 5, "{}. {} \n".format(i + 1, item.encode('latin-1', 'ignore').decode('latin-1')), 0, 1)
except UnicodeEncodeError:
print("An encoding error has occured for the item: {} \n".format(item))
print("That item has been skipped.\n")
pdf.output('./static/files/generations/generated_exercises.pdf','F')
if 'solution' in extra_options:
return (clean_results[0], clean_results[1])
else:
return ('without_solutions', clean_results[1])
# Generate the pdf exercises file for multiple-choice exercise
def generate_files_multiple(corpus, learner_level, sentence_number, teach_goal, extra_options):
pdf = PDF()
pdf.alias_nb_pages()
pdf.add_page()
pdf.set_font('Arial', 'B', 13)
if teach_goal[3] == 'fib':
exercise_type = 'Fill-in-the-blank'
elif teach_goal[3] == 'mistakes':
exercise_type = 'Find-the-mistake'
elif teach_goal[3] == 'multiple':
exercise_type = 'Multiple-choice'
# if the teaching goal concerns Verb Tenses
if teach_goal[1] == 'verb_tense_even':
if teach_goal[2] == 'Simple_Progressive_Present':
pdf.cell(190, 0, 'Simple and Progressive Present, {} exercises for {} level'.format(exercise_type, learner_level), 0,0, 'C')
elif teach_goal[2] == 'Simple_Progressive_Past':
pdf.cell(190, 0, 'Simple and Progressive Past, {} exercises for {} level'.format(exercise_type, learner_level), 0,0, 'C')
pdf.ln(15)
pdf.cell(30, 10, 'Exercises', 0, 0, 'L')
pdf.ln(15)
pdf.set_font('Times', '', 10)
# in case of a shuffle option
if 'shuffle' in extra_options:
generated_fib = verb_tense_generation(corpus, learner_level, sentence_number, teach_goal[0], teach_goal[3], True)
else:
generated_fib = verb_tense_generation(corpus, learner_level, sentence_number, teach_goal[0], teach_goal[3], False)
elif teach_goal[1] == 'verb_tense_odd':
if teach_goal[2] == 'Perfect_Present_Past':
pdf.cell(190, 0, 'Present and Past Perfect, {} exercises for {} level'.format(exercise_type, learner_level), 0,0, 'C')
pdf.ln(15)
pdf.cell(30, 10, 'Exercises', 0, 0, 'L')
pdf.ln(10)
pdf.set_font('Times', '', 10)
if 'shuffle' in extra_options:
generated_fib = verb_tense_generation(corpus, learner_level, sentence_number, teach_goal[0], teach_goal[3], True)
else:
generated_fib = verb_tense_generation(corpus, learner_level, sentence_number, teach_goal[0], teach_goal[3], False)
# the exercises solutions
generated_multiple_solutions = generated_fib[0]
# the fib, correct choice, wrong choice pack
generated_multiple_pack = generated_fib[1]
generated_multiple_fib = []
generated_multiple_correct = []
generated_multiple_wrong = []
# unpack the packed generations for better processing
for elem in generated_multiple_pack:
generated_multiple_fib.append(elem[0][0])
generated_multiple_correct.append(elem[0][1])
generated_multiple_wrong.append(elem[0][2])
for i, item in enumerate(generated_multiple_solutions):
try:
pdf.multi_cell(0, 5, "{}. {} \n".format(i + 1, generated_multiple_fib[i].encode('latin-1', 'ignore').decode('latin-1')), 0, 1)
# in case the profanity option is checked
if 'profanity' in extra_options:
bad_words = profanity_check(item)
if bad_words:
string_bad_words = ', '.join(bad_words)
pdf.set_fill_color(220, 50, 50)
pdf.set_text_color(255, 255, 255)
pdf.cell(0, 5, "The sentence {}, contains these possibly topic-sensitive words: {}".format(i+1, string_bad_words.encode('latin-1', 'ignore').decode('latin-1')), 0, 0, 'L', 1)
pdf.ln(6)
pdf.set_text_color(0, 0, 0)
pdf.multi_cell(0, 5, "A. {} \n".format(generated_multiple_correct[i].encode('latin-1', 'ignore').decode('latin-1')), 0, 1)
pdf.multi_cell(0, 5, "B. {} \n".format(generated_multiple_wrong[i].encode('latin-1', 'ignore').decode('latin-1')), 0, 1)
pdf.ln(6)
except UnicodeEncodeError:
print("An encoding error has occured for the item: {} \n".format(item))
print("That item has been skipped.\n")
# if solutions are asked
if 'solution' in extra_options:
pdf.ln(5)
pdf.set_font('Arial', 'B', 13)
pdf.cell(30, 10, 'Solutions', 0, 0, 'L')
pdf.ln(10)
pdf.set_font('Times', '', 10)
for i, item in enumerate(generated_multiple_solutions):
try:
pdf.multi_cell(0, 5, "{}. {} \n".format(i + 1, item.encode('latin-1', 'ignore').decode('latin-1')), 0, 1)
except UnicodeEncodeError:
print("An encoding error has occured for the item: {} \n".format(item))
print("That item has been skipped.\n")
pdf.output('./static/files/generations/generated_exercises.pdf','F')
pack_multiple = (generated_multiple_fib, generated_multiple_correct, generated_multiple_wrong)
if 'solution' in extra_options:
return (generated_multiple_solutions, pack_multiple)
else:
return ('without_solutions', pack_multiple)
def main(retrieved_parameters):
# unpack the retrieved generation parameters
resource, instances, difficulty, teaching_goal, extra_options = str(retrieved_parameters).split("_")
# the input resource
if resource == 'wikipedia':
corpus = pd.read_pickle('./data/wikipedia_corpus_complete.pkl')
corpus = corpus.sample(frac=1).reset_index(drop=True)
elif resource == 'bnc':
corpus = pd.read_pickle('./data/bnc_corpus_complete.pkl')
corpus = corpus.sample(frac=1).reset_index(drop=True)
# the number of sentences
sentence_number = int(instances)
# the difficulty level of the exercises
learner_level = difficulty
# the target teaching category
if teaching_goal == 'fib-present-simple-progr':
teach_goal = (1, 'verb_tense_even', 'Simple_Progressive_Present', 'fib')
elif teaching_goal == 'fib-past-simple-progr':
teach_goal = (2, 'verb_tense_even', 'Simple_Progressive_Past', 'fib')
elif teaching_goal == 'fib-present-past-perfect':
teach_goal = (3, 'verb_tense_odd', 'Perfect_Present_Past', 'fib')
elif teaching_goal == 'mistakes-present-simple-progr':
teach_goal = (1, 'verb_tense_even', 'Simple_Progressive_Present', 'mistakes')
elif teaching_goal == 'mistakes-past-simple-progr':
teach_goal = (2, 'verb_tense_even', 'Simple_Progressive_Past', 'mistakes')
elif teaching_goal == 'mistakes-present-past-perfect':
teach_goal = (3, 'verb_tense_odd', 'Perfect_Present_Past', 'mistakes')
elif teaching_goal == 'multiple-present-simple-progr':
teach_goal = (1, 'verb_tense_even', 'Simple_Progressive_Present', 'multiple')
elif teaching_goal == 'multiple-past-simple-progr':
teach_goal = (2, 'verb_tense_even', 'Simple_Progressive_Past', 'multiple')
elif teaching_goal == 'multiple-present-past-perfect':
teach_goal = (3, 'verb_tense_odd', 'Perfect_Present_Past', 'multiple')
# the generation process is similar for the fib and find-mistakes type and different for the multiple-choice
if teach_goal[3] == 'multiple':
return generate_files_multiple(corpus, learner_level, sentence_number, teach_goal, extra_options)
else:
return generate_files_fib_finderror(corpus, learner_level, sentence_number, teach_goal, extra_options)
| 3.40625
| 3
|
chapter09/urlopen_auth3.py
|
NetworkRanger/python-core
| 1
|
12774223
|
<reponame>NetworkRanger/python-core
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2019/8/10 11:05 PM
import urllib.request, urllib.error, urllib.parse
LOGIN = 'wesley'
PASSWD = "<PASSWORD>"
URL = 'http://localhost'
REALM = 'Secure Archive'
def handler_version(url):
hdlr = urllib.request.HTTPBasicAuthHandler()
hdlr.add_password(REALM, urllib.parse.urlparse(url)[1], LOGIN, PASSWD)
opener = urllib.request.build_opener(hdlr)
urllib.request.install_opener(opener)
return url
def request_version(url):
from base64 import encodestring
req = urllib.request.Request(url)
b64str = encodestring(bytes('%s:%s' % (LOGIN, PASSWD), 'utf-8'))[:-1]
req.add_header('Authorization', 'Basic %s' % b64str)
return req
for funcType in ('handler', 'request'):
print('*** Using %s:' % funcType.upper())
url = eval('%s_version' % funcType)(URL)
f = urllib.request.urlopen(url)
print(str(f.readline(), 'utf-8'))
f.close()
| 2.125
| 2
|
evscaperoom/states/state_009_fertilizer.py
|
Griatch/evscaperoom
| 8
|
12774224
|
"""
Time to make the super-fertilizer from our ingredients.
"""
from random import random
from evennia.utils import interactive
from ..state import BaseState
from .. import objects
GREETING = """
This is the situation, {name}:
The |rJester|n wants to win your village's yearly |wpie-eating contest|n.
As it turns out, you are one of her most dangerous opponents.
Today, the day of the contest, she invited you to her small cabin for a
'strategy chat'. But she tricked you and now you are |wlocked in|n! If you
don't get out before the contest starts she'll get to eat all those pies on
her own and surely win!
When you get into the cabin, the *chest was just opened, revealing its
secrets to the world ...
"""
INTRO1 = """
Over by the door, Vale claps its hands.
"""
# ------------------------------------------------------------
# chest (kept open)
# ------------------------------------------------------------
CHEST_DESC = """
The chest stands open and prop it up so it won't close again accidentally. It's
pretty small, most of its size was clearly taken up by the intricate locking
mechanism.
Inside it you can see a brass *monocular and a partly burnt *letter.
"""
class ChestOpen(objects.EvscaperoomObject):
pass
# ------------------------------------------------------------
# looking glass
# ------------------------------------------------------------
LOOKINGGLASS_DESC = """
This is a bronze monocular - a 'looking glass' of the type used by sea captains.
It seems to have pretty strong lenses. Better not look at the sun!
"""
LOOKINGGLASS_APPLY = """
You see only a blur. Things in here is way too close.
"""
LOOKINGGLASS_APPLY2 = """
You could maybe direct light through the monocular to put this on fire, but
you doubt it'd burn very well.
"""
LOOKINGGLASS_APPLY_WINDOW = """
The leaves of a distant tree fills your vision. A bird jumps around on a branch, unaware
of your spying. Interesting, but not very useful right now.
However, as you hold up the tube to the window in the right angle you notice
that it causes a very focused spot of intense light around the
location of the fireplace.
"""
LOOKINGGLASS_BOOK = """
You consider burning the Jester's book with the Looking glass. But what if you
need something in there for a later puzzle? There must be something else of
similar dryness that you could burn!
"""
LOOKINGGLASS_BED = """
While the mattress might burn if it was packed with dry straw. But instead it's
packed with fresh grass from the meadow, no way that'll burn any time soon. The
whole thing is even a bit damp ... Guess now you know why! Clearly the Jester
didn't want you to torch her bed.
"""
LOOKINGGLASS_APPLY_TO_ROOM = """
~You ~focus the *monocular at {target}.
"""
LOOKINGGLASS_THINK = """
If you use the looking glass on something that is easily flammable you might be
able to direct enough sunlight onto it to put it on fire!
"""
class LookingGlass(objects.Usable):
target_flag = "looking_glass_sun"
def at_apply(self, caller, action, obj):
self.msg_room(caller, LOOKINGGLASS_APPLY_TO_ROOM.format(target=obj.key))
if obj.check_flag("burnable"):
# actually burnable item
self.room.score(2, "burn with monocular")
obj.handle_burn(caller, self)
elif obj.key == "book":
self.room.score(1, "try to burn book")
self.msg_char(caller, LOOKINGGLASS_BOOK.strip())
elif obj.key == "bed":
self.room.achievement(caller, "Fool planning", "Figured out why the bed is damp")
self.msg_char(caller, LOOKINGGLASS_BED.strip())
else:
self.room.score(1, "used monocular with window")
self.msg_char(caller, LOOKINGGLASS_APPLY_WINDOW.strip())
def get_callsigns(self):
txt = "Actions that make sense: *use on <thing> and *think"
return [], txt
def at_focus_think(self, caller, **kwargs):
self.msg_char(caller, LOOKINGGLASS_THINK.strip())
def at_cannot_apply(self, caller, action, obj):
self.msg_room(caller, LOOKINGGLASS_APPLY_TO_ROOM.format(target=obj.key))
if random() < 0.5:
self.msg_char(caller, LOOKINGGLASS_APPLY)
else:
self.room.score(1, "Getting random insight from monocular")
self.msg_char(caller, LOOKINGGLASS_APPLY2)
# ------------------------------------------------------------
# letter
# ------------------------------------------------------------
LETTER_DESC = """
This letter sits in a crumpled and partly burned envelope; this looks like
something that was never delivered.
On the front of the envelope it simply says:
To Agda
"""
LETTER_READ = """
The letter is written in a compact and tight handwriting.
My beloved Agda,
I have written so many letters that I've then torn up. Know that I've
always loved you in secret and that I never aimed to hurt you. But that
doesn't change the fact that I am the reason you are the way you are today.
You will know how happy I was when you confided in me. But after that kid
got hurt, I just couldn't live with the secret. I laced the champion's pies
with my potion. I am a coward, I could not think of any other way to stop
him. How was I to know he'd lose his appetite that day of all days?
He has been so cold to you since, but at least you are apart now. I did not
know about your other circumstance until much later. But I understand why
your parents had to do what they did in secret. This guilt too is on my
shoulders, for I was the one that made you incapable of such a
responsibility in the first place. You used to hit me out of sport, now you
can't even raise your hand to defend yourself. It's maddening!
I experiment with my hintberries daily to find a way to reverse your
condition. Even if you won't love me, but continue to ridicule and mock me,
I have no bigger wish than to see the old you return. Because then one day,
maybe I'll forgive myself.
Yours forever,
Vale
"""
class Letter(objects.Readable):
def at_read(self, caller):
self.room.score(2, "read letter")
self.msg_char(caller, LETTER_READ.strip())
if not self.check_flag("read_already"):
# reading the letter makes the plant mixable so
# we can create the fertilizer
plant = self.room.state.create_object(
PlantMixable, key="plant")
plant.db.desc = PLANT_DESC.strip()
# ------------------------------------------------------------
# potted plant (on table)
# ------------------------------------------------------------
PLANT_DESC = """
On the table, on the side nearest to the window, stands a potted plant - it's a
rose cutling, no more than a little green stem and a few leaves.
"""
PLANT_DIG_ROOM = """
~You digs around in the dirt of the *plant, to no avail.
"""
PLANT_DIG = """
Carefully you probe around in the small pot with your fingers, but even after
circling the cutling and fully probed the bottom of the pot you don't find
anything hidden in the dirt.
You shuffle the displaced dirt back into place around the fledging little plant.
"""
PLANT_FEEL_ROOM = """
~You ~prick a finger on *plant, letting a drop of blood fall into the dirt.
"""
PLANT_FEEL = """
Ouch! It may be small, but this thing already has thorns! You draw a drop of
blood and let it fall into the dirt.
"""
PLANT_MIX_RESET = """
The mix does not seem to work. ~you ~wipe off the top layer of soil from the
*plant and ~start again.
"""
PLANT_MIX_SUCCESS = """
As ~you ~drop {ingredient} into the soil of the *plant, the rose stickling
suddently starts to shift and writhe. ~You quickly ~back away.
"""
class PlantMixable(objects.Feelable, objects.Mixable):
mixer_flag = "fertilizer_mixer"
ingredient_recipe = [
"childmaker",
"ashes",
"ashes",
"ashes",
"hintberries",
"blood",
]
def at_object_creation(self):
super().at_object_creation()
self.set_flag("blood")
self.set_flag("fertilizer_mixer")
def at_focus_dig(self, caller, **kwargs):
self.msg_room(caller, PLANT_DIG_ROOM.strip(), True)
self.msg_char(caller, PLANT_DIG.strip())
# reset any mixture now
self.db.ingredients = []
def at_focus_feel(self, caller, **kwargs):
# add ourselves!
self.msg_room(caller, PLANT_FEEL_ROOM.strip(), True)
self.handle_mix(caller, self, txt=PLANT_FEEL.strip())
def at_mix(self, caller, ingredient, txt=None):
self.msg_char(caller, txt)
def at_mix_failure(self, caller, ingredient, **kwargs):
self.msg_room(caller, PLANT_MIX_RESET.strip())
@interactive
def at_mix_success(self, caller, ingredient, **kwargs):
self.room.score(2, "Made fertilizer")
self.msg_room(caller, PLANT_MIX_SUCCESS.format(ingredient=ingredient.key).lstrip())
yield(2)
self.next_state()
# ------------------------------------------------------------
# state
# ------------------------------------------------------------
STATE_HINT_LVL1 = """
It's time to grow to the occation. The *locket that fell down into the *ashes
looks important.
"""
STATE_HINT_LVL2 = """
The *letter may be useful for figuring out how to open the *locket. Inside
you'll find the end of a recipe. See if you can find the beginning of it
somewhere.
"""
STATE_HINT_LVL3 = """
Say 'Agda' to the locket to open it. Read about the FERTILIZER in *book to get
the first part of the fertilizer recipe. To make the fertilizer, put the
ingredients in the potted *plant. You need a drop of blood as the last
ingredient. Maybe if you pricked your finger?
"""
STATE_HINT_LVL4 = """
Use the following ingredients with *plant:
- *childmaker potion
- *ashes
- *ashes
- *ashes
- *pie (the hintberry pie)
Finally, examine and feel the potted *plant to prick yourself on its thorns and
get a drop of blood.
"""
class State(BaseState):
next_state = "state_010_burn_firewood"
hints = [STATE_HINT_LVL1,
STATE_HINT_LVL2,
STATE_HINT_LVL3,
STATE_HINT_LVL4]
def character_enters(self, character):
self.cinematic(GREETING.format(name=character.key),
target=character)
@interactive
def init(self):
# we don't need the lever anymore
lever = self.get_object("lever")
if lever:
lever.delete()
# chest needs no further interaction
chest = self.create_object(
ChestOpen, key="chest")
chest.db.desc = CHEST_DESC.strip()
lookingglass = self.create_object(
LookingGlass, key="looking glass", aliases=["monocular", "lookingglass", "glass"])
lookingglass.db.desc = LOOKINGGLASS_DESC.strip()
letter = self.create_object(
Letter, key="letter")
letter.db.desc = LETTER_DESC.strip()
yield(3)
self.msg(INTRO1.rstrip())
def clean(self):
super().clean()
self.room.progress(84)
| 3.078125
| 3
|
genstar.py
|
PoHuit/starlight
| 1
|
12774225
|
# Copyright (c) 2017 <NAME>
# [This program is licensed under the "MIT License"]
# Please see the file COPYING in the source
# distribution of this software for license terms.
"""Generate a simulated EVE Online™ Project Discovery star
luminance trace."""
from random import *
from math import *
samples_per_day = 144
days_per_trace = 30
instrument_noise_amplitude = 0.1
def gen_segment(nsegment,
cycle_amplitude=None,
cycle_period=None,
cycle_phase=None,
walk_amplitude=None):
"""Generate a segment of a simulated star luminance trace,
consisting of segments with some combination of
oscillation, random-walk noise and instrument
noise. No attempt is made at this stage to do
anything too fancy.
"""
segment = []
walk_state = 0.0
for i in range(nsegment):
s = gauss(0.0, 0.5) * instrument_noise_amplitude
walk_state += gauss(0.0, 0.5) * walk_amplitude
s += walk_state
s += cos(2 * pi * (i + cycle_phase) / cycle_period) * cycle_amplitude
segment.append(s)
return segment
def gen_star():
"""Generate a simulated star luminance trace. The trace will
have a mean of 0 and consist of samples in the range
-1..1 or less.
"""
cycle_period = int((0.1 + random() * 4.0) * samples_per_day)
cycle_phase = randrange(int(cycle_period))
cycle_amplitude = 0.0
walk_amplitude = 0.0
if random() < 0.3:
cycle_amplitude = 0.01 + 0.1 * random()
elif random() < 0.6:
walk_amplitude = random() * 0.025
# Calculate a trace as a series of segments.
# Yes, this is left-biased. Meh.
nsegments = max(1, int(gauss(0.5, 4.0)))
nsamples = samples_per_day * days_per_trace
samples = []
while nsegments > 0:
segment_max = nsamples - len(samples)
if nsegments > 1 and segment_max >= samples_per_day:
segment_bias = random() * 0.4 - 0.2
nsegment = randrange(samples_per_day, segment_max)
else:
segment_bias = 0.0
nsegment = segment_max
segment = gen_segment(nsegment,
cycle_amplitude=cycle_amplitude,
cycle_period=cycle_period,
cycle_phase=cycle_phase,
walk_amplitude = walk_amplitude)
samples += [s + segment_bias for s in segment]
nsegments -= 1
min_sample = samples[0]
max_sample = samples[0]
sum_samples = samples[0]
for s in samples[1:]:
min_sample = min(min_sample, s)
max_sample = max(max_sample, s)
sum_samples += s
assert max_sample > min_sample
scale = min(1.0, 1.0 / (max_sample - min_sample))
bias = sum_samples / len(samples)
return [(s - bias) * scale for s in samples]
if __name__ == '__main__':
for s in gen_star():
print(s)
| 2.921875
| 3
|
geocoder/here_reverse.py
|
termim/geocoder
| 1,506
|
12774226
|
#!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
from geocoder.location import Location
from geocoder.here import HereResult, HereQuery
class HereReverseResult(HereResult):
@property
def ok(self):
return bool(self.address)
class HereReverse(HereQuery):
"""
HERE Geocoding REST API
=======================
Send a request to the geocode endpoint to find an address
using a combination of country, state, county, city,
postal code, district, street and house number.
API Reference
-------------
https://developer.here.com/rest-apis/documentation/geocoder
"""
provider = 'here'
method = 'reverse'
_RESULT_CLASS = HereReverseResult
_URL = 'http://reverse.geocoder.cit.api.here.com/6.2/reversegeocode.json'
def _build_params(self, location, provider_key, **kwargs):
params = super(HereReverse, self)._build_params(location, provider_key, **kwargs)
del params['searchtext']
location = str(Location(location))
params.update({
'prox': location,
'mode': 'retrieveAddresses',
'gen': 8,
})
return params
if __name__ == '__main__':
g = HereReverse([45.4049053, -75.7077965])
g.debug()
| 3.03125
| 3
|
legiscrape/video.py
|
notpeter/legiscrape
| 3
|
12774227
|
import json
import logging
import os
import shutil
import subprocess
from urllib.parse import parse_qsl, urlsplit
import requests
def es_search(index, clip_id):
"""Queries the elasticsearch for video metadata"""
search_url = 'http://search.granicus.com/api/%s/_search' % index
query = {'query': {'match': {'video_id': {'query': clip_id}}}, 'size': 1}
query = json.dumps(query)
result = requests.post(search_url, data=query)
result = json.loads(result.content)
# FIXME: This is fragile is fuck.
try:
video = result['hits']['hits'][0]['_source']['http']
except KeyError:
video = None
return video
def get(url, filename=None):
"""Save contents of a URL to a file."""
# TODO: Make this not require curl/wget/aria2c
# Sadly resumable downloads with status updates is non-trivial with requests.
# aria2c multi-connection is 3-4x faster because granicus servers are slow
if not filename:
filename = urlsplit(url).path.split('/')[-1]
if shutil.which('aria2c'):
conns = '4'
cmd = ['aria2c', '--summary-interval', '0', '--auto-file-renaming=false',
'-x', conns, '-s', conns, '-o', filename, url]
elif shutil.which('wget'):
cmd = ['wget', '-c', '-O', filename, url]
elif shutil.which('curl'):
cmd = ['curl', '-L', '-o', filename, '-O', '-C', '-', url]
else:
raise EnvironmentError("No curl or wget...what is this place?")
logging.info('Running %s', " ".join(cmd))
proc = subprocess.Popen(cmd)
proc.communicate()
return filename
def remux(filename, output_filename, srt_filename=None, chapter_filename=None):
"""Demux an MP4 file into aac audio and h264 video"""
if not shutil.which('ffmpeg'):
raise EnvironmentError("Couldn't find ffmpeg.")
if not shutil.which('MP4Box'):
raise EnvironmentError("Couldn't find MP4Box.")
cmd = ['ffmpeg', '-y', '-i', filename, '-vcodec', 'copy',
'-an', '-bsf:v', 'h264_mp4toannexb', "%s.h264" % filename,
'-vn', '-acodec', 'copy', "%s.aac" % filename]
proc = subprocess.Popen(cmd)
proc.communicate()
logging.info('Demuxing: %s', " ".join(cmd))
cmd = ["MP4Box", "-add", "%s.h264#video" % filename,
"-add", "%s.aac#audio" % filename]
if srt_filename and os.path.exists(srt_filename):
cmd.extend(['-add', "%s#lang=eng" % srt_filename])
if srt_filename and os.path.exists(chapter_filename):
cmd.extend(['-chap', chapter_filename])
cmd.extend(['-new', output_filename])
logging.info('Remuxing: %s', " ".join(cmd))
proc = subprocess.Popen(cmd)
proc.communicate()
| 2.625
| 3
|
Chapter04/4_1_download_data.py
|
shamir456/Python-Network-Programming-Cookbook-Second-Edition
| 125
|
12774228
|
#!/usr/bin/env python
# Python Network Programming Cookbook -- Chapter - 4
# This program requires Python 3.5.2 or any later version
# It may run on any other version with/without modifications.
#
# Follow the comments inline to make it run on Python 2.7.x.
import argparse
import urllib.request
# Comment out the above line and uncomment the below for Python 2.7.x.
#import urllib2
REMOTE_SERVER_HOST = 'http://www.cnn.com'
class HTTPClient:
def __init__(self, host):
self.host = host
def fetch(self):
response = urllib.request.urlopen(self.host)
# Comment out the above line and uncomment the below for Python 2.7.x.
#response = urllib2.urlopen(self.host)
data = response.read()
text = data.decode('utf-8')
return text
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='HTTP Client Example')
parser.add_argument('--host', action="store", dest="host", default=REMOTE_SERVER_HOST)
given_args = parser.parse_args()
host = given_args.host
client = HTTPClient(host)
print (client.fetch())
| 3
| 3
|
dash_app/app.py
|
dancasey-ie/Grid2LatLon
| 0
|
12774229
|
import json
import requests
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_leaflet as dl
import pandas as pd
from pyproj import Transformer
import plotly.graph_objects as go
from dash.dependencies import Output, Input, State
import index
MAP_ID = "map-id"
BASE_LAYER_ID = "base-layer-id"
BASE_LAYER_DROPDOWN_ID = "base-layer-drop-down-id"
COORDINATE_CLICK_ID = "coordinate-click-id"
def irishgrid2xy(grid_ref):
"""
Converts irish grid reference as string i.e. "N 15904 34671"
to xy (easting northing) with an origin at the bottem
left of grid "V"
"""
# 5x5 grid letters, missing I
grid = [("V", "W", "X", "Y", "Z"),
("Q", "R", "S", "T", "U"),
("L", "M", "N", "O", "P"),
("F", "G", "H", "J", "K"),
("A", "B", "C", "D", "E")]
grid_ref = grid_ref.split(" ")
letter = grid_ref[0].upper()
easting = grid_ref[1]
northing = grid_ref[2]
if len(easting) == 5 & len(northing) == 5:
for i in range(0,5):
if letter in grid[i]:
northing_corr = i
easting_corr = (grid[i].index(letter))
easting = '%s%s' % (easting_corr, easting)
northing = '%s%s' % (northing_corr, northing)
return easting, northing
def xy2irishgrid(x, y):
"""
Convert x and y coordinate integers into irish grid reference string
"""
x = str(x)
y = str(y)
grid = [("V", "W", "X", "Y", "Z"),
("Q", "R", "S", "T", "U"),
("L", "M", "N", "O", "P"),
("F", "G", "H", "J", "K"),
("A", "B", "C", "D", "E")]
if (len(x) > 6) | (len(y) > 6):
return "Not in IRE"
if len(x) < 6:
easting_corr = '0'
easting = x
else:
easting_corr = x[0]
easting = x[1:]
if len(y) < 6:
northing_corr = '0'
northing = y
else:
northing_corr = y[0]
northing = y[1:]
try:
letter = grid[int(northing_corr)][int(easting_corr)]
except:
return "Not in IRE"
grid_ref = '%s %s %s' % (letter, easting, northing)
return grid_ref
def xy2latlon(x, y):
transformer = Transformer.from_crs("epsg:29903", "epsg:4326")
lat, lon = transformer.transform(x , y)
lat, lon = round(lat,5), round(lon,5)
return lat, lon
def latlon2xy(lat, lon):
transformer = Transformer.from_crs( "epsg:4326","epsg:29903")
x, y = transformer.transform(lat, lon)
x, y = int(x), int(y)
return x, y
app = dash.Dash(__name__,
url_base_pathname="/grid2latlon/",
meta_tags=[{"name": "viewport",
"content": "width=device-width, initial-scale=1"}],
external_stylesheets=[dbc.themes.BOOTSTRAP,
],
prevent_initial_callbacks=True,
suppress_callback_exceptions=True)
server = app.server
app.title = 'Irish Grid to Lat Lon'
@app.callback(Output("location-text", "children"), [Input(MAP_ID, "location_lat_lon_acc")])
def update_location(location):
return "You are within {} meters of (lat,lon) = ({},{})".format(location[2], location[0], location[1])
@app.callback(Output("grid_ref_input_table", "style_table"),
Output("xy_input_table", "style_table"),
Output("latlon_input_table", "style_table"),
Input("inputSelector", "value"))
def change_input_table(table):
if table == "xy":
return {'display': 'none'}, {'display': 'block'}, {'display': 'none'}
elif table == "latlon":
return {'display': 'none'}, {'display': 'none'}, {'display': 'block'}
return {'display': 'block'}, {'display': 'none'}, {'display': 'none'}
@app.callback(Output('latlon_input_table', 'data'),
Input('latlon_input_table', 'data'),
Input(MAP_ID, 'click_lat_lng'),
Input('xy_input_table', 'data'),
)
def update_on_click(latlonRows,click_lat_lon,xyRows):
ctx = dash.callback_context
triggeredID=ctx.triggered[0]['prop_id'].split('.')[0]
if triggeredID == MAP_ID:
for row in latlonRows:
if row == {}:
row['lat']=round(click_lat_lon[0],5)
row['lon']=round(click_lat_lon[1],5)
return latlonRows
elif triggeredID == 'xy_input_table':
latlonRows=[]
for row in xyRows:
try:
lat, lon = xy2latlon(row['x'], row['y'])
latlonRows.append({'lat':lat,'lon':lon})
except:
latlonRows.append({'lat':"",'lon':""})
return latlonRows
return latlonRows
@app.callback(Output('xy_input_table', 'data'),
Input('grid_ref_input_table', 'data'),
)
def update_on_click(gridRefRows):
xyRows = []
for row in gridRefRows:
try:
x, y = irishgrid2xy(row['grid_ref'])
xyRows.append({'x':x,'y':y})
except:
xyRows.append({'x':"",'y':""})
return xyRows
@app.callback(
Output('output_table', 'data'),
Input('latlon_input_table', 'data'))
def update_latlon(rows):
for row in rows:
try:
lat, lon = row['lat'], row['lon']
x, y = latlon2xy(lat, lon)
row['x'], row['y'] = x, y
row['grid_ref'] = xy2irishgrid(x, y)
except:
"fail"
return rows
@app.callback(
Output('markers', 'children'),
Input('output_table', 'data'))
def update_output(rows):
markers_list=[]
i=0
for row in rows:
try:
marker = dl.Marker(position=[row['lat'], row['lon']],
children=dl.Tooltip(
[html.B("Marker {}".format(i)),
html.Br(),
"Grid Ref: {}".format(row['grid_ref']),
html.Br(),
"Lat: {:.2f} \u00b0".format(row['lat']),
html.Br(),
"Lon: {:.2f} \u00b0".format(row['lon']),
html.Br(),
"X: {}".format(row['x']),
html.Br(),
"Y: {}".format(row['y']),
html.Br(),
]))
markers_list.append(marker)
i+=1
except:
"fail"
return markers_list
# Create layout.
app.layout = html.Div(children=[
html.Div(id='page-content',
children=index.create_layout(app)),
])
if __name__ == '__main__':
app.run_server(debug=True)
| 2.546875
| 3
|
callers.py
|
Mars-tin/fast-stripe-calling
| 0
|
12774230
|
import pandas as pd
from scipy.signal import find_peaks_cwt
from utils import *
def _stripe_caller(mat, orientation='h',
max_range=3000000, resolution=25000,
interval=200000, filter_rate=0.95,
min_length=500000, closeness=1000000,
stripe_width=1, merge=1, window_size=5,
chrome='chr1', cell_type='hspc'):
# Step 1: for different distance ranges pick the "local maximum" positions
print(' Step 1: Finding local maximum for different contact distances...')
positions = {}
for dis in range(0, max_range - min_length + 1, interval // 2):
print(f' {dis}-{dis + interval}')
distance_range = (dis, dis + interval)
pos_h = pick_max_positions(mat, interval=interval,
distance_range=distance_range,
resolution=resolution, line_width=stripe_width,
window_size=window_size)
for p in pos_h:
if p not in positions:
positions[p] = []
positions[p].append(distance_range)
print(' A total of {} positions are located'.format(len(positions)))
# Step 2: find the accurate range of stripe
print(' Step 2: Finding the spanning range for each stripe...')
all_positions = []
lst = sorted(positions.keys())
for i, idx in enumerate(lst):
if idx <= window_size or idx >= mat.shape[0] - window_size:
continue
arr = line_neighbor_score(mat, idx, line_width=stripe_width,
distance_range=(0, max_range // resolution),
window_size=window_size,
neighbor_trick='mean', line_trick='med',
metric='ratio')
head, tail, max_val = find_max_slice(arr)
if max_val > 0:
all_positions.append((idx, head, tail, max_val))
print(" Succeed sliced {} locations".format(len(all_positions)))
# Step 3: Merging and filtering
print(' Step 3: Merging neighboring stripes...')
all_positions = merge_positions(all_positions, merge_range=merge)
new_positions = []
for elm in all_positions:
[_, _, head, tail, _] = elm
if (tail - head) * resolution >= min_length \
and head * resolution <= closeness:
new_positions.append(elm)
else:
pass
print('A total of {} positions are located after merging'.format(len(new_positions)))
# Step 4: Statistical test on symmetric stripe region
df_tad = pd.read_csv("data/tad/{}_{}kb.csv".format(cell_type, 25))
df_tad = df_tad.loc[df_tad['type'] == "domain"]
df_tad = df_tad.loc[df_tad['chrom'] == chrome]
list_tad = np.asarray([df_tad['start']//resolution, df_tad['end']//resolution]).T
results = []
print(' Step 4: Statistical Tests...')
for elm in new_positions:
[st, ed, head, tail, score] = elm
p = stat_test(mat, orientation, st, ed, head, tail, stripe_width, window_size, list_tad)
if p < 0.2:
results.append([st, ed, head, tail, score, 1-p/2])
return results
def _deletion_caller(mat, orientation='h',
max_range=3000000, resolution=25000,
interval=200000, filter_rate=0.95,
min_length=500000, closeness=1000000,
stripe_width=1, merge=1, window_size=5,
chrome='chr1', cell_type='hspc'):
# Step 1: for different distance ranges pick the "local maximum" positions
print(' Step 1: Finding local minimum for different contact distances...')
anchor_strength = np.sum(mat, axis=1)
anchor_strength = np.max(anchor_strength) - anchor_strength
peaks = find_peaks_cwt(anchor_strength, np.arange(1, window_size))
print(' A total of {} positions are located'.format(len(peaks)))
# Step 2: find the accurate range of stripe
print(' Step 2: Finding the spanning range for each stripe...')
all_positions = []
for i, idx in enumerate(peaks):
if idx <= window_size or idx >= mat.shape[0] - window_size:
continue
arr = line_neighbor_score(mat, idx, line_width=stripe_width,
distance_range=(0, max_range // resolution),
window_size=window_size,
neighbor_trick='mean', line_trick='med',
metric='ratio')
head, tail, max_val = find_max_slice(-1 * arr)
if max_val > 0:
all_positions.append((idx, head, tail, max_val))
print(" Succeed sliced {} locations".format(len(all_positions)))
# Step 3: Merging and filtering
print(' Step 3: Merging neighboring stripes...')
all_positions = merge_positions(all_positions, merge_range=merge)
new_positions = []
for elm in all_positions:
[_, _, head, tail, _] = elm
if (tail - head) * resolution >= min_length \
and head * resolution <= closeness:
new_positions.append(elm)
else:
pass
print('A total of {} positions are located after merging'.format(len(new_positions)))
# Step 4: Statistical test
results = []
print(' Step 4: Statistical Tests...')
for elm in new_positions:
[st, ed, head, tail, score] = elm
p = stat_test(mat, orientation, st, ed, head, tail, stripe_width, window_size)
if p < 0.001:
results.append([st, ed, head, tail, score, 1-p/2])
return results
| 2.1875
| 2
|
numpy_unique.py
|
Kalpavrikshika/python_modules
| 1
|
12774231
|
import numpy as numpy
a = numpy.array([5,2,6,2,7,5,6,8,2,9])
print ('First array:')
print(a)
print('\n')
print('Unique values of first array:')
u = numpy.unique(a)
print(u)
print('\n')
print('Unique array and indices array:')
u, indices = numpy.unique(a, return_index = True)
print (indices)
print('\n')
print('We can see each number corresponds to index in original array:')
print(a)
print('\n')
print('Indices of unique array:')
u, indices = numpy.unique(a, return_inverse = True)
print (u)
print ('\n')
print('Indices are:')
print(indices)
print('\n')
print('Reconstruct the original array using indices')
print (u[indices])
print ('\n')
print('Return the count repetitions of unique elements:')
u, indices = numpy.unique(a, return_counts = True)
print(u)
print(indices)
| 3.734375
| 4
|
lxman/cli/page.py
|
stuxcrystal/lxman
| 1
|
12774232
|
<gh_stars>1-10
from colorama import Fore
import json
class Page(object):
def __init__(self, title):
self.title = title
self.table = []
def push(self, name, value):
self.table.append((name, value))
def output(self, raw=False):
if raw:
print(self.json())
else:
self.echo()
def echo(self):
print(Fore.GREEN + self.title)
print(Fore.GREEN + ('-'*len(self.title)))
if len(self.table) > 0:
max_tbl_length = len(max(self.table, key=lambda s:len(s[0]))[0])
else:
max_tbl_length = 0
for k, v in self.table:
k = str(k).ljust(max_tbl_length)
prefix = Fore.CYAN + k + Fore.RESET + ' : '
if v is None:
print(prefix + Fore.RED + '<None>')
elif isinstance(v, (list, tuple)):
length = max_tbl_length + 3
for sv in v:
print(prefix + Fore.CYAN + str(sv))
prefix = ' ' * length
else:
print(prefix + Fore.CYAN + str(v))
def json(self):
return json.dumps(dict(self.table))
| 3.0625
| 3
|
client/verta/verta/_cli/deployment/predict.py
|
stefan-petrov-toptal/modeldb
| 835
|
12774233
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
import click
import json
from .deployment import deployment
from ... import Client
@deployment.group()
def predict():
"""Making prediction to a deployment-related entity.
For example, to make a prediction to an endpoint, run
`verta deployment predict endpoint "<endpoint path>" --data "<input data>"`
"""
pass
@predict.command(name="endpoint")
@click.argument("path", nargs=1, required=True)
@click.option("--data", "-d", required=True, help="Input for prediction. Must be a valid JSON string.")
@click.option("--workspace", "-w", help="Workspace to use.")
def predict_endpoint(path, data, workspace):
"""Making prediction via a deployed endpoint.
"""
client = Client()
try:
endpoint = client.get_endpoint(path=path, workspace=workspace)
except ValueError:
raise click.BadParameter("endpoint with path {} not found".format(path))
deployed_model = endpoint.get_deployed_model()
result = deployed_model.predict(json.loads(data))
click.echo(json.dumps(result))
| 2.578125
| 3
|
src/basic/exc_trace.py
|
liuguanglin/Learn_Python
| 0
|
12774234
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import traceback
import logging
class MyErr(Exception):
pass
def division(s):
n = int(s)
if n == 0:
raise MyErr('Divisor cannot be %s' % s)
return 10 / n
def func(x):
try:
division(x)
except MyErr as me:
print(me)
traceback.print_exc()
print('traceback')
finally:
print('End func()')
func('0')
print('-' * 40)
def func2(x):
try:
division(x)
except MyErr as me:
print(me)
logging.exception(me)
print('logging...')
finally:
print('End func2()')
func2('0')
print('END')
| 3.65625
| 4
|
libs/datasets/dataset_export.py
|
imxtrabored/covid-data-model
| 2
|
12774235
|
<gh_stars>1-10
from typing import Iterator
import logging
from libs.datasets.timeseries import TimeseriesDataset
from libs.datasets.dataset_utils import AggregationLevel
_logger = logging.getLogger(__name__)
STATE_EXPORT_FIELDS = ["state", "cases", "deaths", "source", "date"]
COUNTY_EXPORT_FIELDS = ["fips", "cases", "deaths", "source", "date"]
def latest_case_summaries_by_state(dataset: TimeseriesDataset) -> Iterator[dict]:
"""Builds summary of latest case data by state and county.
Data is generated for the embeds which expects a list of records in this format:
{
"state": <state>,
"date": "YYYY-MM-DD",
"cases": <cases>,
"deaths": <deaths>,
"counties": [
{"fips": <fips code>, "cases": <cases>, "deaths": <deaths", "date": <date>}
]
}
Args:
data: Timeseries object.
Returns: List of data.
"""
dataset = dataset.get_subset(None, country="USA")
latest_state = dataset.latest_values(AggregationLevel.STATE)
latest_county = dataset.latest_values(AggregationLevel.COUNTY)
latest_state["date"] = latest_state["date"].dt.strftime("%Y-%m-%d")
latest_county["date"] = latest_county["date"].dt.strftime("%Y-%m-%d")
states = latest_state[STATE_EXPORT_FIELDS].to_dict(orient="records")
for state_data in states:
state = state_data["state"]
if len(state) != 2:
_logger.info(f"Skipping state {state}")
continue
county_data = latest_county[latest_county.state == state]
counties = county_data[COUNTY_EXPORT_FIELDS].to_dict(orient="records")
state_data.update({"counties": counties})
yield state, state_data
| 2.625
| 3
|
components/keras/Train_classifier/_samples/sample_pipeline.py
|
kamalmemon/pipelines
| 0
|
12774236
|
import keras
from kfp import components
chicago_taxi_dataset_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/e3337b8bdcd63636934954e592d4b32c95b49129/components/datasets/Chicago%20Taxi/component.yaml')
pandas_transform_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml')
keras_train_classifier_from_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/f6aabf7f10b1f545f1fd5079aa8071845224f8e7/components/keras/Train_classifier/from_CSV/component.yaml')
number_of_classes = 2
# Creating the network
dense_network_with_sigmoid = keras.Sequential(layers=[
keras.layers.Dense(10, activation=keras.activations.sigmoid),
keras.layers.Dense(number_of_classes, activation=keras.activations.sigmoid),
])
def keras_classifier_pipeline():
training_data_in_csv = chicago_taxi_dataset_op(
where='trip_start_timestamp >= "2019-01-01" AND trip_start_timestamp < "2019-02-01"',
select='tips,trip_seconds,trip_miles,pickup_community_area,dropoff_community_area,fare,tolls,extras,trip_total',
limit=1000,
).output
training_data_for_classification_in_csv = pandas_transform_csv_op(
table=training_data_in_csv,
transform_code='''df.insert(0, "was_tipped", df["tips"] > 0); del df["tips"]; df = df.fillna(0)''',
).output
features_in_csv = pandas_transform_csv_op(
table=training_data_for_classification_in_csv,
transform_code='''df = df.drop(columns=["was_tipped"])''',
).output
labels_in_csv = pandas_transform_csv_op(
table=training_data_for_classification_in_csv,
transform_code='''df = df["was_tipped"] * 1''',
).output
keras_train_classifier_from_csv_op(
training_features=features_in_csv,
training_labels=labels_in_csv,
network_json=dense_network_with_sigmoid.to_json(),
learning_rate=0.1,
num_epochs=100,
)
if __name__ == '__main__':
kfp_endpoint = None
kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(keras_classifier_pipeline, arguments={})
| 2.796875
| 3
|
snappy_wrappers/tools/ped_to_vcf_header.py
|
PotatoThrone/snappy-pipeline
| 5
|
12774237
|
# -*- coding: utf-8 -*-
"""Add PED file content to VCF file header.
Usage::
$ snappy-ped_to_vcf_header --ped-file PED --output TXT
"""
from __future__ import print_function
import argparse
from collections import OrderedDict, defaultdict, namedtuple
import os
import re
import sys
__author__ = "<NAME> <<EMAIL>>"
#: Translation scheme for PED attributes (sex, disease) to text
PED_TRANSLATE = OrderedDict(
[
("Sex", OrderedDict([("0", "Unknown"), ("1", "Male"), ("2", "Female")])),
("Disease", OrderedDict([("0", "Unknown"), ("1", "Unaffected"), ("2", "Affected")])),
]
)
#: Template VCF header string for PED attributes (sex, disease)
TPL_META = "##META=<ID={id},Type=String,Number=1,Values={values}>"
#: Template VCF header string for samples in PED
TPL_SAMPLE = "##SAMPLE=<ID={id},Sex={sex},Disease={disease}>"
#: Template VCF header string for pedigree structure
TPL_PEDIGREE = "##PEDIGREE=<ID={id},Family={family},Father={father},Mother={mother}>"
#: Donor representation
Donor = namedtuple("Donor", ["family", "id", "father", "mother", "sex", "disease"])
def parse_ped(ped_file):
"""Parse a given PED file and yield each line as a Donor."""
for line in ped_file.readlines():
line = re.split("\s+", line.rstrip())[:6]
if line[0].startswith("#"):
continue
if not len(line) == 6:
raise Exception("PED file not complete.")
yield Donor(*line)
def ped_vcf_header(donors):
"""Return VCF header string given donors."""
snippet = []
families = defaultdict(list)
for key, value in PED_TRANSLATE.items():
snippet.append(TPL_META.format(id=key, values="[{}]".format(", ".join(value.values()))))
for donor in donors:
families[donor.family].append(donor)
snippet.append(
TPL_SAMPLE.format(
id=donor.id,
sex=PED_TRANSLATE["Sex"][donor.sex],
disease=PED_TRANSLATE["Disease"][donor.disease],
)
)
for _, members in families.items():
for member in members:
if len(members) == 1 or not member.father == "0" or not member.mother == "0":
snippet.append(
TPL_PEDIGREE.format(
id=member.id,
family=member.family,
father=member.father,
mother=member.mother,
)
)
return "\n".join(snippet)
def write_header_snippet(header, output):
"""Open a text file (create folders if necessary) and write content."""
if os.path.dirname(output) and not os.path.exists(os.path.dirname(output)):
os.makedirs(os.path.dirname(output))
with open(output, "wt") as fh:
fh.write("{}\n".format(header))
def run(args):
"""Program entry point after parsing the command line."""
donors = parse_ped(args.ped_file)
header = ped_vcf_header(donors)
write_header_snippet(header, args.output)
def main(argv=None):
"""Program entry point for parsing the command line."""
parser = argparse.ArgumentParser(
description=("Parse PED file and transform pedigree information into" "VCF header format")
)
parser.add_argument(
"--ped-file",
type=argparse.FileType("rt"),
required=True,
help="PED file that contains the pedigree information",
)
parser.add_argument(
"--output", type=str, required=True, help="File with PED information as VCF header snippet"
)
args = parser.parse_args(argv)
run(args)
if __name__ == "__main__":
sys.exit(main())
| 2.703125
| 3
|
xythrion/extensions/__init__.py
|
fisher60/Xythrion
| 9
|
12774238
|
from pkgutil import iter_modules
EXTENSIONS = frozenset(
extension.name for extension in iter_modules(("xythrion/extensions",), "xythrion.extensions.")
)
| 1.179688
| 1
|
python/biograph/internal/vdb_query.py
|
spiralgenetics/biograph
| 16
|
12774239
|
#!/usr/bin/env python3
'''
Query the VDB
'''
import argparse
import logging
import sys
import pprint
import pandas as pd
import orjson as json
from biograph.internal import vdb
def parse_args(clargs):
''' biograph vdb query args '''
parser = argparse.ArgumentParser(
description='Query the Spiral Variant DataBase (VDB)'
)
if not clargs:
clargs.append('--help')
parser.add_argument('sample', type=str, nargs='?', help='Match this sample or annotation name')
parser.add_argument('-a', '--aid', type=str, help='Match this analysis id')
parser.add_argument('-g', '--group', help='Use this VDB group', default=None)
parser.add_argument('--all', action='store_true', help='List every available analysis')
parser.add_argument('-v', '--verbose', action='store_true', help='Show full details for each analysis')
parser.add_argument('--annotation', action='store_true', help='Show a list of all available annotations')
parser.add_argument('--lookup', type=str, help='Look up annotations with this VARID')
parser.add_argument('--debug', action='store_true', help=argparse.SUPPRESS)
return parser.parse_args(clargs)
def list_entries(entries, verbose, annotation):
''' Pretty print query results '''
if verbose:
for entry in entries:
if annotation:
aid, sample, refname, build, imported_on, description, version, refhash, header = entry
else:
aid, sample, refname, build, imported_on, description, refhash, header = entry
print(f"""
name: {sample}
aid: {aid}
build: {build}
refname: {refname}
refhash: {refhash}
imported_on: {imported_on}"""
)
if not pd.isna(description):
print(f'description: {description}')
if annotation:
print(f'version: {version}')
print()
for line in header.split('\n'):
if line.lower().startswith(
(
'##filter',
'##filedate',
'##reference',
'##info',
'##format',
'##alt',
'##contig',
'##refhash',
'##fileformat',
'#chrom',
'##sequence-region',
'##gff-version',
)
):
continue
if line.lower().startswith('##source='):
subs = line.split(',')
print(subs[0][2:])
for sub in subs[1:]:
print(' ', sub)
else:
print(line[2:])
print('-=' * 10)
return
# not verbose
if annotation:
print(f"{'name':<16} {'version':<12} {'build':<7} {'analysis_id':<36} {'imported_on':<24} description")
else:
print(f"{'name':<16} {'refname':<10} {'build':<7} {'analysis_id':<36} {'imported_on':<24} description")
for entry in entries:
if annotation:
aid, sample, _, build, imported_on, description, version = entry
print(f"{sample:<16} {version:<12} {build:<7} {aid:<36} {imported_on.ctime():<24} {'' if pd.isna(description) else description}")
else:
aid, sample, refname, build, imported_on, description = entry
print(f"{sample:<16} {refname:10} {build:<7} {aid:<36} {imported_on.ctime():<24} {'' if pd.isna(description) else description}")
def lookup(db, varid_lookup):
''' look up an annotation '''
urls = {
'ClinVar': 'https://www.ncbi.nlm.nih.gov/clinvar/variation/{varid}/',
'ensGene': 'https://uswest.ensembl.org/Homo_sapiens/Gene/Summary?db=core;g={varid}',
'Ensembl': 'https://uswest.ensembl.org/Homo_sapiens/Gene/Summary?db=core;g={varid}',
'knownGene': 'https://uswest.ensembl.org/Homo_sapiens/Gene/Summary?db=core;g={varid}',
'GCF_000001405.39': 'https://www.ncbi.nlm.nih.gov/search/all/?term={varid}',
'ncbiRefSeq': 'https://www.ncbi.nlm.nih.gov/search/all/?term={varid}',
'GeneCards': 'https://www.genecards.org/cgi-bin/carddisp.pl?{varid}',
'UniProt': 'https://www.uniprot.org/uniprot/{varid}',
'OMIM': 'https://omim.org/entry/{varid}',
}
pp = pprint.PrettyPrinter(indent=4)
query = f'''
SELECT name, chrom, pos, varid, build, t_info
FROM {db.path.annotation.data_table}
WHERE varid = '{varid_lookup}'
'''
for (name, chrom, pos, varid, build, t_info) in db.query(query):
if name in urls:
url = urls[name].format(varid=varid)
else:
url = ''
print(f"{name}:{varid} at {chrom}:{pos} on {build} {url}")
pp.pprint(json.loads(t_info))
print('')
def main(clargs):
''' the main event '''
args = parse_args(clargs)
logLevel = logging.DEBUG if args.debug else logging.WARNING
logging.basicConfig(stream=sys.stderr, level=logLevel, format='%(message)s')
db = vdb.connect(group=args.group)
if args.lookup:
lookup(db, args.lookup)
exit(0)
if args.annotation:
table = db.path.annotation.meta_table
sample_field = 'name'
ready = db.get_crawler_state(db.path.annotation.crawler) == 'READY'
else:
table = db.path.vcf.meta_table
sample_field = 'sample'
ready = db.get_crawler_state(db.path.vcf.crawler) == 'READY'
if not ready:
logging.warning(f"NOTE: The crawler is currently running. Some data may not yet be indexed.")
if not db.query(f"SHOW TABLES LIKE '{table}'"):
raise SystemExit(f"{db.group} is empty. Run 'biograph vdb import --group {db.group}' to import VCF data,\nor 'biograph vdb group --crawl {db.group}' to update the index.")
if not args.annotation:
logging.warning(f"vdb group '{db.group}' {'(frozen)' if db.is_frozen() else ''}")
query_filters = []
if not args.all:
if args.aid:
query_filters.append(f'''
AND aid = '{args.aid}'
''')
if args.sample:
if args.annotation:
query_filters.append(f"AND name = '{args.sample}'")
else:
query_filters.append(f"AND sample = '{args.sample}'")
query_filter = '\n'.join(query_filters)
fields = [
'aid',
sample_field,
'refname',
'build',
'imported_on',
'description'
]
if args.annotation:
fields.append('version')
if args.verbose:
fields.extend(['refhash', 'header'])
query = f'''
SELECT
{','.join(fields)}
FROM
{table}
WHERE
1=1
{query_filter}
ORDER BY {sample_field} ASC, imported_on DESC
;
'''
logging.debug(query)
list_entries(db.query(query, cache=False), args.verbose, args.annotation)
if __name__ == '__main__':
main(sys.argv[1:])
| 2.75
| 3
|
site/config/settings/heroku.py
|
bobhilt/pyhawaii-website
| 0
|
12774240
|
import logging
import environ
from .base import * # noqa
_env = environ.Env()
logging.disable(logging.DEBUG)
#
# invariants
#
DEBUG = False
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
#
# pulled from environment
#
SECRET_KEY = _env('SECRET_KEY')
ALLOWED_HOSTS = ('.herokuapp.com')
_additional_hosts = _env('ADDITIONAL_ALLOWED_HOSTS', default='')
if _additional_hosts:
ALLOWED_HOSTS += _additional_hosts.split(',')
| 1.90625
| 2
|
builder/utils/helpers.py
|
zpapakipos/dynabench-1
| 0
|
12774241
|
<filename>builder/utils/helpers.py
# Copyright (c) Facebook, Inc. and its affiliates.
import pickle
def load_queue_dump(path, logger=None):
try:
queue = pickle.load(open(path, "rb"))
if logger:
logger.info(f"Load existing queue from {path}.")
return queue
except FileNotFoundError:
if logger:
logger.info("No existing deployment queue found. Re-initializing...")
return []
| 2.3125
| 2
|
changes/verification.py
|
michaeljoseph/changes
| 135
|
12774242
|
import logging
from plumbum import CommandNotFound, local
from changes import shell
log = logging.getLogger(__name__)
def get_test_runner():
test_runners = ['tox', 'nosetests', 'py.test']
test_runner = None
for runner in test_runners:
try:
test_runner = local[runner]
except CommandNotFound:
continue
return test_runner
def run_tests():
"""Executes your tests."""
test_runner = get_test_runner()
if test_runner:
result = test_runner()
log.info('Test execution returned:\n%s' % result)
return result
else:
log.info('No test runner found')
return None
def run_test_command(context):
if context.test_command:
result = shell.dry_run(context.test_command, context.dry_run)
log.info('Test command "%s", returned %s', context.test_command, result)
return True
| 2.4375
| 2
|
mirtop/libs/sql.py
|
srinivas32/mirtop
| 0
|
12774243
|
import sqlite3
from sqlite3 import Error
def create_connection():
""" create a database connection to the SQLite database
specified by the db_file
:return: Connection object or None
"""
try:
conn = sqlite3.connect(":memory:")
return conn
except Error as e:
print(e)
return None
def create_reads_table(conn, key="sequence"):
""" create a table on the SQLite database
:param conn: connection of database
:return: Connection object or None
"""
c = conn.cursor()
c.execute("CREATE TABLE reads"
" (name text, sequence text,"
" chrom text, start int,"
" PRIMARY KEY(%s, chrom, start))" % key)
conn.commit()
def insert_row_in_reads_table(cur, fields):
""" create a table on the SQLite database
:param cur: connection of database
:param fields: list with columns to fill table
:return: Connection object or None
"""
# c = conn.cursor()
cur.execute("INSERT INTO reads VALUES"
" (\"%s\", \"%s\", \"%s\", %s)" % (fields[0],
fields[1],
fields[2],
fields[3]))
def select_all_reads(conn):
"""
Query all rows in the reads table
:param conn: the Connection object
:return:
"""
cur = conn.cursor()
cur.execute("SELECT * FROM reads")
rows = cur.fetchall()
return rows
| 4.21875
| 4
|
open directory.py
|
blulady/python
| 0
|
12774244
|
<filename>open directory.py
from tkinter import *
from tkinter import filedialog
root = Tk()
root.title('open directory')
def directory():
root.directory = filedialog.askdirectory(initialdir="/",title="Select a Directory")
label = Label(root, text=root.directory)
label.pack()
b1 = Button(root, text="Browse", command = directory)
b1.pack()
root.mainloop()
| 3.703125
| 4
|
build/lib/g3ar/threadutils/contractor.py
|
VillanCh/g3ar
| 48
|
12774245
|
#!/usr/bin/env python
#coding:utf-8
"""
Author: --<v1ll4n>
Purpose: Provide some useful thread utils
Created: 2016/10/29
"""
import uuid
import time
import unittest
try:
from queue import Queue, Empty
except:
from Queue import Queue, Empty
import threading
from threading import Thread
import inspect
import traceback
#----------------------------------------------------------------------
def start_thread(func, *args, **kwargs):
""""""
ret = Thread(target=func, args=args, kwargs=kwargs)
ret.daemon = True
ret.start()
########################################################################
class Contractor(object):
"""Create Multi-Thread to support the
concurrence of many tasks"""
#----------------------------------------------------------------------
def __init__(self, thread_max=50):
"""Constructor"""
self.task_list = []
self.result_queue = Queue()
self.lock = threading.Lock()
self.thread_max = thread_max
self._current_thread_count = 0
self._executed_task_count = 0
self._task_count = 0
def _uuid1_str(self):
'''Returns: random UUID tag '''
return str(uuid.uuid1())
#----------------------------------------------------------------------
def feed(self, target_func, *vargs, **kwargs):
""""""
self.add_task(target_func, *vargs, **kwargs)
def add_task(self, target_func, *args, **argv):
'''Add task to Pool and wait to exec
Params:
target_func : A callable obj, the entity of the current task
args : the args of [target_func]
argv : the argv of [target_func]
'''
assert callable(target_func), '[!] Function can \'t be called'
ret = {}
ret['func'] = target_func
ret['args'] = args
ret['argv'] = argv
#ret['uuid'] = self.signal_name
self._task_count = self._task_count + 1
self.task_list.append(ret)
def start(self):
""""""
ret = Thread(target=self._run)
ret.daemon = True
ret.start()
return self.result_queue
#----------------------------------------------------------------------
def _run(self):
""""""
for i in self.task_list:
#print self.current_thread_count
while self.thread_max <= self._current_thread_count:
time.sleep(0.3)
self._start_task(i)
def _start_task(self, task):
""""""
self._current_thread_count = self._current_thread_count + 1
try:
ret = Thread(target=self._worker, args=(task,))
ret.daemon = True
ret.start()
except TypeError:
self._current_thread_count = self._current_thread_count - 1
def _worker(self, dictobj):
""""""
func = dictobj['func']
args = dictobj['args']
argv = dictobj['argv']
try:
result = func(*args, **argv)
except Exception as e:
#print 'ecp occured'
result = tuple([e, traceback.extract_stack()])
self.lock.acquire()
self._executed_task_count = self._executed_task_count + 1
self._add_result_to_queue(result=result)
self.lock.release()
def _add_result_to_queue(self, **kw):
""""""
assert 'result' in kw, '[!] Result Error!'
self.result_queue.put(kw['result'])
self._current_thread_count = self._current_thread_count - 1
#----------------------------------------------------------------------
def get_result_queue(self):
""""""
return self.result_queue
#----------------------------------------------------------------------
def get_task_list(self):
""""""
self.task_list
#----------------------------------------------------------------------
def get_result_generator(self):
""""""
while True:
try:
ret = self.result_queue.get(timeout=1)
yield ret
except Empty:
if self._task_count == self._executed_task_count:
break
else:
pass
#----------------------------------------------------------------------
@property
def task_count(self):
""""""
return self._task_count
#----------------------------------------------------------------------
@property
def executed_task_count(self):
""""""
return self._executed_task_count
#----------------------------------------------------------------------
@property
def percent(self):
""""""
return float(self._task_count)/float(self._executed_task_count)
#----------------------------------------------------------------------
@property
def current_thread_count(self):
""""""
return self._current_thread_count
class UtilsTest(unittest.case.TestCase):
def runTest(self):
ms = inspect.getmembers(self)
ms = [x[0] for x in ms]
for i in ms:
if callable(getattr(self,i)):
if i.startswith('test_'):
getattr(self, i)()
def test_pool(self):
def demo_task(*args):
'''simulate the plugin.run'''
print('[!] Computing!')
time.sleep(args[0])
print('[!] Finished!')
print()
returns = 'Runtime Length : %s' % str(args)
return returns
pool = Contractor()
pool.add_task(demo_task, 7)
pool.add_task(demo_task, 3)
q = pool.start()
print(pool._current_thread_count)
self.assertIsInstance(q, Queue)
r = q.get()
print(r)
self.assertIsInstance(r, str)
r = q.get()
print(r)
self.assertIsInstance(r, str)
print(pool._current_thread_count)
if __name__ == '__main__':
unittest.main()
| 2.734375
| 3
|
testresults/scons230_trace/strace/pypy_cpython_touch_only/cpython_run/time_all.py
|
SCons/scons-performance
| 0
|
12774246
|
<filename>testresults/scons230_trace/strace/pypy_cpython_touch_only/cpython_run/time_all.py<gh_stars>0
#!/usr/bin/env python2
import os
import sys
import sconstest
import time
import subprocess
def stdoutCmd(args):
process = subprocess.Popen(args, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sout, serr = process.communicate()
return sout
def getProcesses():
return stdoutCmd(['ps','-ef']).splitlines()
def getMemwatchPID():
pid = None
for p in getProcesses():
if 'python' in p and 'mem_watch' in p:
pid = p.split()[1]
return pid
def time_make(folder):
oldwd = os.path.abspath(os.getcwd())
os.chdir(folder)
print "%s - writing project files" % folder
os.system('./genscons.pl > /dev/null 2>&1')
os.chdir('sconsbld')
print "%s - make" % folder
os.system('%s -o ../../results/%s/make_cleanbuild.times make > /dev/null 2>&1' % (sconstest.time, folder))
print "%s - make update" % folder
os.system('%s -o ../../results/%s/make_update.times make > /dev/null 2>&1' % (sconstest.time, folder))
os.chdir(oldwd)
def time_scons(folder):
oldwd = os.path.abspath(os.getcwd())
os.chdir(folder)
print "%s - writing project files" % folder
os.system('./genscons.pl > /dev/null 2>&1')
os.chdir('sconsbld')
print "%s - scons" % folder
# Start mem_watch in background
subprocess.Popen(["python",os.path.join(sconstest.suite,"mem_watch.py"),"../../results/%s/mem.csv" % folder])
time.sleep(1)
os.system('%s -o ../../results/%s/scons_cleanbuild.times lscons %s > /dev/null 2>&1' % (sconstest.time, folder, sconstest.sconswd))
time.sleep(2)
# Wait for memwatch to finish
while getMemwatchPID():
time.sleep(1)
os.chdir(oldwd)
timelist = ['f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
'p'
]
def main():
# Run make
for t in timelist:
if not os.path.isdir(os.path.join('results', t)):
os.makedirs(os.path.join('results', t))
time_scons(t)
os.system('rm -rf %s/sconsbld' % t)
if __name__ == "__main__":
main()
| 2.21875
| 2
|
analysis/workflow/scripts/generate_phenotypes.py
|
aryarm/happler
| 0
|
12774247
|
<gh_stars>0
#!/usr/bin/env python
import sys
import argparse
import numpy as np
import pandas as pd
from math import sqrt
from numpy.random import normal
def parse_args():
parser = argparse.ArgumentParser(
description=(
"Use a genotype matrix to create phenotypes from a variant or variants."
)
)
parser.add_argument(
"-o",
"--out",
default=sys.stdout,
help=(
"path to TSV file where variants are cols, last column is phenotypes, and"
" samples are rows"
),
)
mut_ex1 = parser.add_mutually_exclusive_group()
mut_ex1.add_argument(
"--beta-snp", type=float, default=0, help="beta value for a randomly chosen SNP"
)
mut_ex1.add_argument(
"--beta-str", type=float, default=0, help="beta value for a randomly chosen STR"
)
mut_ex2 = parser.add_mutually_exclusive_group()
mut_ex2.add_argument(
"--str-loc",
nargs="+",
help="the start POS of the STR(s) to use for simulating the phenotypes",
)
mut_ex2.add_argument(
"--snp-loc",
nargs="+",
help="the start POS of the SNP(s) to use for simulating the phenotypes",
)
mut_ex2.add_argument(
"--max-vars",
type=int,
default=1,
help="the max number of random variants to consider",
)
parser.add_argument(
"gt_matrix",
nargs="?",
default=sys.stdin,
help="a tab-separated GT matrix where variants are cols and samples are rows",
)
args = parser.parse_args()
return args
def main(args):
np.random.seed(40)
if args.str_loc or args.snp_loc:
# if a specific STR or SNP has been requested...
variants = ["sample"]
if args.str_loc:
variants.extend([idx + ":1" for idx in args.str_loc])
else:
variants.extend([idx + ":0" for idx in args.snp_loc])
try:
gt = pd.read_csv(args.gt_matrix, sep="\t", index_col=0, usecols=variants)
except ValueError:
# Handle cases in which the variant that the user requested didn't appear
# in the genotype matrix, presumably because it was filtered out during
# the MAF thresholding
raise ValueError(
"Do the variants at POS {} pass your MAF threshold?".format(
variants[1:]
)
)
elif args.max_vars:
# if we should just choose a random variant...
gt = pd.read_csv(args.gt_matrix, sep="\t", index_col=0)
gt = gt.sample(args.max_vars, axis=1)
else:
# this shouldn't happen!
pass
gt["phen"] = 0
for col in gt.columns:
if col == "phen":
continue
# z-normalize the column so it has stdev 1
gt[col] = (gt[col] - gt[col].mean()) / gt[col].std(ddof=0)
# use the STR beta if the col name has a '1' at the end of it
beta_val = args.beta_str if int(col[-1]) else args.beta_snp
gt["phen"] = gt[col] * beta_val + gt["phen"]
# add some noise! sample randomly from a gaussian distribution
gt["phen"] += normal(scale=sqrt(1 - (beta_val ** 2)), size=gt[col].shape)
gt.to_csv(args.out, header=True, sep="\t")
if __name__ == "__main__":
main(parse_args())
| 3.046875
| 3
|
safe_transaction_service/history/migrations/0002_auto_20190725_0857.py
|
kanhirun/safe-transaction-service
| 5
|
12774248
|
# Generated by Django 2.2.2 on 2019-07-25 08:57
from django.db import migrations, models
import gnosis.eth.django.models
class Migration(migrations.Migration):
dependencies = [
('history', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='multisigconfirmation',
name='signature',
field=gnosis.eth.django.models.HexField(max_length=500, null=True),
),
migrations.AlterField(
model_name='multisigconfirmation',
name='block_date_time',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='multisigconfirmation',
name='block_number',
field=gnosis.eth.django.models.Uint256Field(null=True),
),
migrations.AlterField(
model_name='multisigconfirmation',
name='transaction_hash',
field=gnosis.eth.django.models.Sha3HashField(null=True),
),
]
| 1.703125
| 2
|
model/knowledge_representation/curriculum.py
|
e-kolpakov/study-model
| 2
|
12774249
|
import logging
__author__ = 'e.kolpakov'
class Curriculum:
def __init__(self):
self._competency_index = {}
self._fact_index = {}
self._lesson_index = {}
def register_competency(self, competency):
"""
Registers competency with curriculum.
:param competency: Competency
"""
self._register(competency, self._competency_index)
def register_fact(self, fact):
"""
Registers fact with curriculum
:param fact: Fact
:return: None
"""
self._register(fact, self._fact_index)
def register_lesson(self, lesson):
"""
Registers lesson with curriculum
:param Lesson lesson: Lesson to register
:return: None
"""
self._register(lesson, self._lesson_index)
def find_competency(self, competency_code):
"""
Finds competency by code
:param competency_code: str
:rtype: knowledge_representation.Competency
"""
return self._find(competency_code, self._competency_index)
def find_fact(self, fact_code):
"""
Finds fact by code
:param fact_code: str
:rtype: knowledge_representation.Fact
"""
return self._find(fact_code, self._fact_index)
def find_lesson(self, lesson_code):
"""
Finds lesson by code
:param lesson_code: str
:rtype: BaseLesson
"""
return self._find(lesson_code, self._lesson_index)
def all_competencies(self):
return self._competency_index.values()
def all_facts(self):
return self._fact_index.values()
def all_lessons(self):
return self._lesson_index.values()
def find_lessons(self, lesson_type=None):
filters = []
if lesson_type is not None:
filters.append(lambda lesson: isinstance(lesson, lesson_type))
composite_filter = lambda lesson: all(subfilter(lesson) for subfilter in filters)
return filter(composite_filter, self._lesson_index.values())
@staticmethod
def _register(entity, index, message="{0} already registered", code_selector=None):
code_selector = code_selector if code_selector else lambda x: x.code
code = code_selector(entity)
if code in index:
message = message.format(entity)
logging.getLogger(__name__).warn(message)
raise ValueError(message)
index[code] = entity
@staticmethod
def _find(code, index, default=None):
"""
:param str code: code to look up
:param dict index: index to search
:param object|None default: default value if object is not found
:rtype: object
"""
return index.get(code, default)
| 2.953125
| 3
|
core/client.py
|
leno3s/FF_checker
| 0
|
12774250
|
<gh_stars>0
import tweepy
class Client:
def __init__(self, consumer_key, consumer_secret, access_token, access_token_secret):
self.auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
self.auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(self.auth, wait_on_rate_limit=True)
@classmethod
def create_from_conf(cls, conf):
return Client(
consumer_key=conf.consumer_key,
consumer_secret=conf.consumer_secret,
access_token=conf.access_token,
access_token_secret=conf.access_token_secret
)
def get_following(self):
id_sn = {}
for user in tweepy.Cursor(self.api.friends, cursor=-1).items():
id_sn[user.id] = user.screen_name
return id_sn
def get_follower(self):
id_sn = {}
for user in tweepy.Cursor(self.api.followers, cursor=-1).items():
id_sn[user.id] = user.screen_name
return id_sn
def send_direct_message(self, uid, message):
self.api.send_direct_message(user_id=uid, text=message)
| 2.859375
| 3
|