hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bbf00ed1d2c63a8cbd6917e7f62b070f2c550c40
| 4,492
|
py
|
Python
|
src/main.py
|
Naman-ntc/3D-HourGlass-Network
|
e58b7b6a78d35bc14fe4c0bc611f80022b2f409b
|
[
"MIT"
] | 53
|
2018-10-28T20:07:16.000Z
|
2021-12-17T02:25:57.000Z
|
src/main.py
|
Naman-ntc/3D-HourGlass-Network
|
e58b7b6a78d35bc14fe4c0bc611f80022b2f409b
|
[
"MIT"
] | 3
|
2019-01-07T14:01:39.000Z
|
2019-05-07T12:01:44.000Z
|
src/main.py
|
Naman-ntc/3D-HourGlass-Network
|
e58b7b6a78d35bc14fe4c0bc611f80022b2f409b
|
[
"MIT"
] | 9
|
2018-10-28T22:31:29.000Z
|
2021-10-14T02:54:27.000Z
|
import os
import time
import datetime
import ref
import torch
import torch.utils.data
from opts import opts
from model.Pose3D import Pose3D
from datahelpers.dataloaders.fusedDataLoader import FusionDataset
from datahelpers.dataloaders.h36mLoader import h36m
from datahelpers.dataloaders.mpiiLoader import mpii
from datahelpers.dataloaders.posetrackLoader import posetrack
from utils.utils import adjust_learning_rate
from utils.logger import Logger
from train import train,val
from inflateScript import *
def main():
opt = opts().parse()
torch.cuda.set_device(opt.gpu_id)
print('Using GPU ID: ' ,str(torch.cuda.current_device()))
now = datetime.datetime.now()
logger = Logger(opt.saveDir + '/logs_{}'.format(now.isoformat()))
if opt.loadModel == 'none':
model = inflate(opt).cuda()
elif opt.loadModel == 'scratch':
model = Pose3D(opt.nChannels, opt.nStack, opt.nModules, opt.numReductions, opt.nRegModules, opt.nRegFrames, ref.nJoints, ref.temporal).cuda()
else :
if opt.isStateDict:
model = Pose3D(opt.nChannels, opt.nStack, opt.nModules, opt.numReductions, opt.nRegModules, opt.nRegFrames, ref.nJoints, ref.temporal).cuda()
model.load_state_dict(torch.load(opt.loadModel))
model = model.cuda()
print("yaya")
else:
model = torch.load(opt.loadModel).cuda()
val_loader = torch.utils.data.DataLoader(
h36m('val', opt),
batch_size = 1,
shuffle = False,
num_workers = int(ref.nThreads)
)
if opt.completeTest:
mp = 0.
cnt = 0.
for i in range(6000//opt.nVal):
opt.startVal = 120*i
opt.nVal = opt.nVal
a,b = val(i, opt, val_loader, model)
mp += a*b
cnt += b
print("This Round " + str(a) + " MPJPE in " + str(b) + " frames!!")
print("Average MPJPE so far " + str(mp/cnt))
print("")
print("------Finally--------")
print("Final MPJPE ==> :" + str(mp/cnt))
return
if (opt.test):
val(0, opt, val_loader, model)
return
train_loader = torch.utils.data.DataLoader(
FusionDataset('train',opt) if opt.loadMpii else h36m('train',opt),
batch_size = opt.dataloaderSize,
shuffle = True,
num_workers = int(ref.nThreads)
)
optimizer = torch.optim.RMSprop(
[{'params': model.hg.parameters(), 'lr': opt.LRhg},
{'params': model.dr.parameters(), 'lr': opt.LRdr}],
alpha = ref.alpha,
eps = ref.epsilon,
weight_decay = ref.weightDecay,
momentum = ref.momentum
)
def hookdef(grad):
newgrad = grad.clone()
if (grad.shape[2]==1):
newgrad = grad*opt.freezefac
else:
newgrad[:,:,1,:,:] = grad[:,:,1,:,:]*opt.freezefac
return newgrad
def hookdef1(grad):
newgrad = grad.clone()
newgrad[:,4096:8192] = newgrad[:,4096:8192]*opt.freezefac
return newgrad
for i in (model.parameters()):
if len(i.shape)==5:
_ = i.register_hook(hookdef)
if len(i.shape)==2:
_ = i.register_hook(hookdef1)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor = opt.dropMag, patience = opt.patience, verbose = True, threshold = opt.threshold)
for epoch in range(1, opt.nEpochs + 1):
loss_train, loss3d_train, mpjpe_train, acc_train = train(epoch, opt, train_loader, model, optimizer)
logger.scalar_summary('loss_train', loss_train, epoch)
#logger.scalar_summary('acc_train', acc_train, epoch)
logger.scalar_summary('mpjpe_train', mpjpe_train, epoch)
logger.scalar_summary('loss3d_train', loss3d_train, epoch)
if epoch % opt.valIntervals == 0:
loss_val, loss3d_val, mpjpe_val, acc_val = val(epoch, opt, val_loader, model)
logger.scalar_summary('loss_val', loss_val, epoch)
# logger.scalar_summary('acc_val', acc_val, epoch)
logger.scalar_summary('mpjpe_val', mpjpe_val, epoch)
logger.scalar_summary('loss3d_val', loss3d_val, epoch)
torch.save(model.state_dict(), os.path.join(opt.saveDir, 'model_{}.pth'.format(epoch)))
logger.write('{:8f} {:8f} {:8f} {:8f} {:8f} {:8f} \n'.format(loss_train, mpjpe_train, loss3d_train, acc_val, loss_val, mpjpe_val, loss3d_val, acc_train))
else:
logger.write('{:8f} {:8f} {:8f} \n'.format(loss_train, mpjpe_train, loss3d_train, acc_train))
#adjust_learning_rate(optimizer, epoch, opt.dropLR, opt.LR)
if opt.scheduler == 1:
scheduler.step(int(loss_train))
elif opt.scheduler == 2:
scheduler.step(int(loss3d_train))
elif opt.scheduler == 3:
scheduler.step(int(loss_train + loss3d_train))
elif opt.scheduler == 4:
scheduler.step(int(mpjpe_train))
logger.close()
if __name__ == '__main__':
#torch.set_default_tensor_type('torch.DoubleTensor')
main()
| 31.412587
| 163
| 0.70236
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 571
| 0.127115
|
bbf0d14e96a9123beca18b5184daaba1114c19e3
| 4,129
|
py
|
Python
|
simulated_crowds/simulated_heterogeneous_crowd_study.py
|
aburnap/JMD2015-When-Crowdsourcing-Fails
|
b0bdcba7e35a1678c5c42c8f69461d724887fb35
|
[
"MIT"
] | 3
|
2015-03-19T11:49:05.000Z
|
2019-06-06T22:25:49.000Z
|
simulated_crowds/simulated_heterogeneous_crowd_study.py
|
aburnap/JMD2015-When-Crowdsourcing-Fails
|
b0bdcba7e35a1678c5c42c8f69461d724887fb35
|
[
"MIT"
] | null | null | null |
simulated_crowds/simulated_heterogeneous_crowd_study.py
|
aburnap/JMD2015-When-Crowdsourcing-Fails
|
b0bdcba7e35a1678c5c42c8f69461d724887fb35
|
[
"MIT"
] | null | null | null |
#-----------------------------------------------------------------------------
#
# Paper: When Crowdsourcing Fails: A Study of Expertise on Crowdsourced
# Design Evaluation
# Author: Alex Burnap - aburnap@umich.edu
# Date: October 10, 2014
# License: Apache v2
# Description: Simulated Crowd Study for Heterogeneous Crowds. Used to
# generate data for Figure 5.
#
#-----------------------------------------------------------------------------
import simulation_heterogeneous as sim
import model
import numpy as np
import pymc
import csv
#-----------------------------------------------------------------------------
# Simulated Crowd Variables
crowd_parameters = {
'num_participants' : 60,
'crowd_makeup' : 'homogeneous',
'homo_mean' : .8,
'homo_std_dev' : .1,
'mixture_means' : (.2, .8),
'mixture_std_dev' : (.1, .1),
'mixture_coefficients' : (.9, .1),
}
design_parameters = {
'num_designs' : 8,
'num_subcriteria' : 1,
'true_design_criteria_score_makeup' : 'random',
'true_design_evaluation_difficulty_makeup' : 'same',
'true_design_evaluation_difficulty_score' : .5,
}
cluster_biases0 = np.zeros(design_parameters['num_designs'])
cluster_biases1 = np.zeros(design_parameters['num_designs'])
cluster_biases1[6] = 0.5
cluster_parameters = {
'num_clusters' : 2,
'cluster_proportions' : (.8,.2),
'cluster_biases' : (cluster_biases0 ,cluster_biases1), # this is on 0_1 scale
}
evaluation_parameters = {
'num_queries_per_participant' : 20,
'num_designs_per_query' : 3,
'interface_difficulty' : 0,
'logistic_scale' : .1,
}
for i in xrange(250):
print '----------------------------------------------------'
print "Iteration %i" % (i+1)
print
i_cluster0_proportion = np.random.random()
cluster_parameters['cluster_proportions']= (i_cluster0_proportion, 1-i_cluster0_proportion)
env=sim.Environment(crowd_parameters, design_parameters, evaluation_parameters, cluster_parameters)
env.designs[6].true_criteria_score = 0.2
env.run_evaluations()
raw_model = model.create_model(env.evaluations_matrix,
crowd_parameters['num_participants'],
design_parameters['num_designs'])
model_instance = pymc.Model(raw_model)
# Initial Values Set by MAP
#pymc.MAP(model_instance).fit(method='fmin_powell')
print '---------- Finished Running MAP to Set MCMC Initial Values ----------'
# Run MCMC
print '--------------------------- Starting MCMC ---------------------------'
M = pymc.MCMC(model_instance)
M.sample(200000,100000, thin=5, verbose=0)
true_abilities = [env.participants[i].true_ability for i in xrange(crowd_parameters['num_participants'])]
true_scores=[(env.designs[i].true_criteria_score*4+1) for i in xrange(design_parameters['num_designs'])]
bayesian_network_scores = np.transpose(M.criteria_score_vector.stats()['mean'])*4+1
bayesian_network_abilities = np.transpose(M.ability_vector.stats()['mean'])
averaging_scores = [np.average(env.evaluations_matrix[:,i]) for i in xrange(design_parameters['num_designs'])]
averaging_MSqE = np.average((np.array(true_scores) - np.array(averaging_scores))**2)
bayesian_network_MSqE = np.average((np.array(true_scores) - np.array(bayesian_network_scores))**2)
bayesian_network_abilities_MSqE = np.average((np.array(true_abilities) - np.array(bayesian_network_abilities))**2)
bayesian_network_logistic_scale = M.logistic_scale_num.stats()['mean']
bayesian_network_design_difficulty = M.design_difficulty_num.stats()['mean']
with open("./simulated_crowd_results/results_heterogeneous_clusters.csv","a") as csvfile:
results=csv.writer(csvfile)
results.writerow([i_cluster0_proportion, averaging_MSqE,
bayesian_network_MSqE,
bayesian_network_abilities_MSqE,
bayesian_network_logistic_scale,
bayesian_network_design_difficulty])
| 43.463158
| 118
| 0.627755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,551
| 0.375636
|
bbf0f0dbbea749b29ef7a61b2ac5e680c12f1409
| 1,053
|
py
|
Python
|
basic-part-1/07-print-file-extension.py
|
inderpal2406/python-practice-2022
|
59e280a5babefc96b1a9c773a79fb5176e876f7a
|
[
"MIT"
] | null | null | null |
basic-part-1/07-print-file-extension.py
|
inderpal2406/python-practice-2022
|
59e280a5babefc96b1a9c773a79fb5176e876f7a
|
[
"MIT"
] | null | null | null |
basic-part-1/07-print-file-extension.py
|
inderpal2406/python-practice-2022
|
59e280a5babefc96b1a9c773a79fb5176e876f7a
|
[
"MIT"
] | null | null | null |
# This script will accept a filename from the user and print the extension of that.
# If the script doesn't find a period in filename, then it'll display result accordingly.
# "not in" or "in" membership operator can be used with strings as well along with list, tuples.
# Need to check which additional other places can it be used.
# Import modules.
import platform
import os
# Detect the OS and clear the screen.
os_name = platform.system()
if os_name == "Windows":
os.system("cls")
elif os_name == "Linux":
os.system("clear")
# Display purpose of the script.
print(f"This script will accept filename from the user and print its extension.\n")
# Accept user input.
filename = input("Enter the filename: ")
# Check if the filename has a period "." in it. If it contains a period, then extract the extension and display it.
if "." not in filename:
print(f"\nThe filename doesn't contain . in it. It seems to be a file without extension.\n")
else:
our_list = filename.split(".")
print(f"\nFile extension: {our_list[-1]}\n")
| 30.970588
| 115
| 0.716999
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 804
| 0.763533
|
bbf2ae61952632fab35bb3d4da6625e30a6cc5d4
| 1,279
|
py
|
Python
|
src/Xtb/Python/__init__.py
|
qcscine/xtb_wrapper
|
5295244771ed5efe3d9e1582e07ed9d26545d387
|
[
"BSD-3-Clause"
] | null | null | null |
src/Xtb/Python/__init__.py
|
qcscine/xtb_wrapper
|
5295244771ed5efe3d9e1582e07ed9d26545d387
|
[
"BSD-3-Clause"
] | null | null | null |
src/Xtb/Python/__init__.py
|
qcscine/xtb_wrapper
|
5295244771ed5efe3d9e1582e07ed9d26545d387
|
[
"BSD-3-Clause"
] | 1
|
2022-02-04T13:40:00.000Z
|
2022-02-04T13:40:00.000Z
|
__copyright__ = """This code is licensed under the 3-clause BSD license.
Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.
See LICENSE.txt for details.
"""
import os
import scine_utilities as utils
from distutils import ccompiler
manager = utils.core.ModuleManager()
if not manager.module_loaded('Xtb'):
shlib_suffix = ccompiler.new_compiler().shared_lib_extension
module_filename = "xtb.module" + shlib_suffix
# Look within the python module directory (module is here in the case of
# python packages) and the lib folder the site packages are in
current_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.dirname(os.path.dirname(os.path.dirname(current_path)))
test_paths = [current_path, lib_path]
def exists_and_could_load(path):
full_path = os.path.join(path, module_filename)
if os.path.exists(full_path):
try:
manager.load(full_path)
except RuntimeError as err:
print("Could not load {}: {}".format(full_path, err))
return False
return True
return False
if not any(map(exists_and_could_load, test_paths)):
raise ImportError('{} could not be located.'.format(module_filename))
| 36.542857
| 78
| 0.693511
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 359
| 0.280688
|
bbf40515dd7d835260533fe653dd331f52016415
| 5,062
|
py
|
Python
|
perch/validators.py
|
OpenPermissions/perch
|
36d78994133918f3c52c187f19e50132960a0156
|
[
"Apache-2.0"
] | 3
|
2016-05-03T20:07:25.000Z
|
2020-12-22T07:16:11.000Z
|
perch/validators.py
|
OpenPermissions/perch
|
36d78994133918f3c52c187f19e50132960a0156
|
[
"Apache-2.0"
] | 17
|
2016-04-26T09:35:42.000Z
|
2016-08-18T10:07:40.000Z
|
perch/validators.py
|
OpenPermissions/perch
|
36d78994133918f3c52c187f19e50132960a0156
|
[
"Apache-2.0"
] | 1
|
2019-05-20T01:40:56.000Z
|
2019-05-20T01:40:56.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2016 Open Permissions Platform Coalition
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
"""Voluptuous validotor functions"""
import re
from urlparse import urlsplit
from voluptuous import AllInvalid, Invalid, Schema, ALLOW_EXTRA
from .model import State
class MetaSchema(object):
"""
Schema must pass all validators. Useful for cases where a field depends on
the value of another field
Similar to using All with a schema and vaildator function, e.g.
All(Schema({'x': int, 'y': int}), x_greater_than_y)
>>> validate = MetaSchema({'x': '10'}, Coerce(int))
>>> validate('10')
10
"""
def __init__(self, schema, *validators, **kwargs):
self.validators = validators
self.msg = kwargs.pop('msg', None)
self._schema = Schema(schema)
self._schemas = [Schema(val, **kwargs) for val in validators]
@property
def schema(self):
return self._schema.schema
def __call__(self, v):
try:
v = self._schema(v)
for schema in self._schemas:
v = schema(v)
except Invalid as e:
raise e if self.msg is None else AllInvalid(self.msg)
return v
def partial_schema(schema, filtered_fields):
"""
Validator for part of a schema, ignoring some fields
:param schema: the Schema
:param filtered_fields: fields to filter out
"""
return Schema({
k: v for k, v in schema.schema.items()
if getattr(k, 'schema', k) not in filtered_fields
}, extra=ALLOW_EXTRA)
def valid_email(email):
"""Validate email."""
if "@" not in email:
raise Invalid('This email is invalid.')
return email
def validate_hex(color):
"""
Validate string is a hex color code
"""
hex_re = '^#(?:[0-9a-fA-F]{3}){1,2}$'
if not re.match(hex_re, color):
raise Invalid('Invalid Hex Color')
return color
def validate_url(url):
"""Validate URL is valid
NOTE: only support http & https
"""
schemes = ['http', 'https']
netloc_re = re.compile(
r'^'
r'(?:\S+(?::\S*)?@)?' # user:pass auth
r'(?:[a-z0-9]|[a-z0-9][a-z0-9\-]{0,61}[a-z0-9])'
r'(?:\.(?:[a-z0-9]|[a-z0-9][a-z0-9\-]{0,61}[a-z0-9]))*' # host
r'(?::[0-9]{2,5})?' # port
r'$', re.IGNORECASE
)
try:
scheme, netloc, path, query, fragment = urlsplit(url)
except ValueError:
raise Invalid('Invalid URL')
if scheme not in schemes:
raise Invalid('Missing URL scheme')
if not netloc_re.search(netloc):
raise Invalid('Invalid URL')
return url
def validate_reference_links(reference_links):
"""
Vaidate reference links data structure
Expected data structure:
{
"links": {
id_type1: url1,
id_type2: url2
},
"redirect_id_type": id_type1 | id1_type2
}
where links is an optional key but must be a dictionary with id types to
URLs if it exists, and redirect_id_type is optional but if it exists,
it must point to one of the existing id types in the links object. It is
used to set a default redirect URL that is used by the resolution service.
"""
allowed_keys = ['links', 'redirect_id_type']
if not isinstance(reference_links, dict):
raise Invalid('Expected reference_links to be an object')
if 'links' in reference_links and not isinstance(reference_links['links'], dict):
raise Invalid('Expected links in reference_links to be an object')
links = reference_links.get('links', {})
redirect_id_type = reference_links.get('redirect_id_type')
for key in reference_links:
if key not in allowed_keys:
raise Invalid('Key {} is not allowed'.format(key))
if redirect_id_type and redirect_id_type not in links:
raise Invalid('Redirect ID type must point to one of the links\' ID types')
[validate_url(url) for url in links.values()]
return reference_links
VALID_STATES = {x.name for x in State}
VALID_USER_STATES = {x.name for x in [State.approved, State.deactivated]}
def validate_state(state):
return _validate_state(state, VALID_STATES)
def validate_user_state(state):
return _validate_state(state, VALID_USER_STATES)
def _validate_state(state, valid_states):
"""Validate a state string"""
if state in State:
return state.name
elif state in valid_states:
return state
else:
raise Invalid('Invalid state')
| 29.260116
| 107
| 0.644212
| 936
| 0.184907
| 0
| 0
| 66
| 0.013038
| 0
| 0
| 2,432
| 0.480443
|
bbf5f66b6a4f40cea15c174917bd79930606ce25
| 189
|
py
|
Python
|
tests/controls/scroller.py
|
whitegreyblack/PyWin
|
78f3637b4c03c11d7f6ef15b20a1acf699d4be24
|
[
"MIT"
] | null | null | null |
tests/controls/scroller.py
|
whitegreyblack/PyWin
|
78f3637b4c03c11d7f6ef15b20a1acf699d4be24
|
[
"MIT"
] | null | null | null |
tests/controls/scroller.py
|
whitegreyblack/PyWin
|
78f3637b4c03c11d7f6ef15b20a1acf699d4be24
|
[
"MIT"
] | null | null | null |
"""ScrollList Component Test"""
import curses
from source.controls import Window
from source.controls import ScrollList as Scroller
__author__ = "Samuel Whang"
def view(screen):
pass
| 18.9
| 50
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.238095
|
bbf6bf0479cef19ff010cf6f671d185104dd03d3
| 9,060
|
py
|
Python
|
glycan_profiling/tandem/evaluation_dispatch/task.py
|
mstim/glycresoft
|
1d305c42c7e6cba60326d8246e4a485596a53513
|
[
"Apache-2.0"
] | 4
|
2019-04-26T15:47:57.000Z
|
2021-04-20T22:53:58.000Z
|
glycan_profiling/tandem/evaluation_dispatch/task.py
|
mstim/glycresoft
|
1d305c42c7e6cba60326d8246e4a485596a53513
|
[
"Apache-2.0"
] | 8
|
2017-11-22T19:20:20.000Z
|
2022-02-14T01:49:58.000Z
|
glycan_profiling/tandem/evaluation_dispatch/task.py
|
mstim/glycresoft
|
1d305c42c7e6cba60326d8246e4a485596a53513
|
[
"Apache-2.0"
] | 3
|
2017-11-21T18:05:28.000Z
|
2021-09-23T18:38:33.000Z
|
import os
from collections import deque
from glycan_profiling.task import TaskBase
debug_mode = bool(os.environ.get("GLYCRESOFTDEBUG"))
class StructureSpectrumSpecificationBuilder(object):
"""Base class for building structure hit by spectrum specification
"""
def build_work_order(self, hit_id, hit_map, scan_hit_type_map, hit_to_scan):
"""Packs several task-defining data structures into a simple to unpack payload for
sending over IPC to worker processes.
Parameters
----------
hit_id : int
The id number of a hit structure
hit_map : dict
Maps hit_id to hit structure
hit_to_scan : dict
Maps hit id to list of scan ids
scan_hit_type_map : dict
Maps (hit id, scan id) to the type of mass shift
applied for this match
Returns
-------
tuple
Packaged message payload
"""
return (hit_map[hit_id],
[(s, scan_hit_type_map[s, hit_id])
for s in hit_to_scan[hit_id]])
class TaskSourceBase(StructureSpectrumSpecificationBuilder, TaskBase):
"""A base class for building a stream of work items through
:class:`StructureSpectrumSpecificationBuilder`.
"""
batch_size = 10000
def add(self, item):
"""Add ``item`` to the work stream
Parameters
----------
item : object
The work item to deal
"""
raise NotImplementedError()
def join(self):
"""Checkpoint that may halt the stream generation.
"""
return
def feed(self, hit_map, hit_to_scan, scan_hit_type_map):
"""Push tasks onto the input queue feeding the worker
processes.
Parameters
----------
hit_map : dict
Maps hit id to structure
hit_to_scan : dict
Maps hit id to list of scan ids
scan_hit_type_map : dict
Maps (hit id, scan id) to the type of mass shift
applied for this match
"""
i = 0
n = len(hit_to_scan)
seen = dict()
for hit_id, scan_ids in hit_to_scan.items():
i += 1
hit = hit_map[hit_id]
# This sanity checking is likely unnecessary, and is a hold-over from
# debugging redundancy in the result queue. For the moment, it is retained
# to catch "new" bugs.
# If a hit structure's id doesn't match the id it was looked up with, something
# may be wrong with the upstream process. Log this event.
if hit.id != hit_id:
self.log("Hit %r doesn't match its id %r" % (hit, hit_id))
if hit_to_scan[hit.id] != scan_ids:
self.log("Mismatch leads to different scans! (%d, %d)" % (
len(scan_ids), len(hit_to_scan[hit.id])))
# If a hit structure has been seen multiple times independent of whether or
# not the expected hit id matches, something may be wrong in the upstream process.
# Log this event.
if hit.id in seen:
self.log("Hit %r already dealt under hit_id %r, now again at %r" % (
hit, seen[hit.id], hit_id))
raise ValueError(
"Hit %r already dealt under hit_id %r, now again at %r" % (
hit, seen[hit.id], hit_id))
seen[hit.id] = hit_id
if i % self.batch_size == 0 and i:
self.join()
try:
work_order = self.build_work_order(hit_id, hit_map, scan_hit_type_map, hit_to_scan)
# if debug_mode:
# self.log("...... Matching %s against %r" % work_order)
self.add(work_order)
# Set a long progress update interval because the feeding step is less
# important than the processing step. Additionally, as the two threads
# run concurrently, the feeding thread can log a short interval before
# the entire process has formally logged that it has started.
if i % 10000 == 0:
self.log("...... Dealt %d work items (%0.2f%% Complete)" % (i, i * 100.0 / n))
except Exception as e:
self.log("An exception occurred while feeding %r and %d scan ids: %r" % (hit_id, len(scan_ids), e))
self.log("...... Finished dealing %d work items" % (i,))
self.join()
return
def feed_groups(self, hit_map, hit_to_scan, scan_hit_type_map, hit_to_group):
"""Push task groups onto the input queue feeding the worker
processes.
Parameters
----------
hit_map : dict
Maps hit id to structure
hit_to_scan : dict
Maps hit id to list of scan ids
scan_hit_type_map : dict
Maps (hit id, scan id) to the type of mass shift
applied for this match
hit_to_group: dict
Maps group id to the set of hit ids which are
"""
i = 0
j = 0
n = len(hit_to_group)
seen = dict()
for group_key, hit_keys in hit_to_group.items():
hit_group = {
"work_orders": {}
}
i += 1
for hit_id in hit_keys:
j += 1
scan_ids = hit_to_scan[hit_id]
hit = hit_map[hit_id]
# This sanity checking is likely unnecessary, and is a hold-over from
# debugging redundancy in the result queue. For the moment, it is retained
# to catch "new" bugs.
# If a hit structure's id doesn't match the id it was looked up with, something
# may be wrong with the upstream process. Log this event.
if hit.id != hit_id:
self.log("Hit %r doesn't match its id %r" % (hit, hit_id))
if hit_to_scan[hit.id] != scan_ids:
self.log("Mismatch leads to different scans! (%d, %d)" % (
len(scan_ids), len(hit_to_scan[hit.id])))
# If a hit structure has been seen multiple times independent of whether or
# not the expected hit id matches, something may be wrong in the upstream process.
# Log this event.
if hit.id in seen:
self.log("Hit %r already dealt under hit_id %r, now again at %r in group %r" % (
hit, seen[hit.id], hit_id, group_key))
raise ValueError(
"Hit %r already dealt under hit_id %r, now again at %r" % (
hit, seen[hit.id], hit_id))
seen[hit.id] = (hit_id, group_key)
work_order = self.build_work_order(
hit_id, hit_map, scan_hit_type_map, hit_to_scan)
hit_group['work_orders'][hit_id] = work_order
self.add(hit_group)
if i % self.batch_size == 0 and i:
self.join()
self.log("...... Finished dealing %d work items" % (i,))
self.join()
return
def __call__(self, hit_map, hit_to_scan, scan_hit_type_map, hit_to_group=None):
if not hit_to_group:
return self.feed(hit_map, hit_to_scan, scan_hit_type_map)
else:
return self.feed_groups(hit_map, hit_to_scan, scan_hit_type_map, hit_to_group)
class TaskDeque(TaskSourceBase):
"""Generate an on-memory buffer of work items
Attributes
----------
queue : :class:`~.deque`
The in-memory work queue
"""
def __init__(self):
self.queue = deque()
def add(self, item):
self.queue.append(item)
def pop(self):
return self.queue.popleft()
def __iter__(self):
return iter(self.queue)
class TaskQueueFeeder(TaskSourceBase):
def __init__(self, input_queue, done_event):
self.input_queue = input_queue
self.done_event = done_event
def add(self, item):
self.input_queue.put(item)
def join(self):
return self.input_queue.join()
def feed(self, hit_map, hit_to_scan, scan_hit_type_map):
"""Push tasks onto the input queue feeding the worker
processes.
Parameters
----------
hit_map : dict
Maps hit id to structure
hit_to_scan : dict
Maps hit id to list of scan ids
scan_hit_type_map : dict
Maps (hit id, scan id) to the type of mass shift
applied for this match
"""
super(TaskQueueFeeder, self).feed(hit_map, hit_to_scan, scan_hit_type_map)
self.done_event.set()
return
def feed_groups(self, hit_map, hit_to_scan, scan_hit_type_map, hit_to_group):
super(TaskQueueFeeder, self).feed_groups(hit_map, hit_to_scan, scan_hit_type_map, hit_to_group)
self.done_event.set()
return
| 37.438017
| 115
| 0.562252
| 8,909
| 0.983333
| 0
| 0
| 0
| 0
| 0
| 0
| 4,286
| 0.473068
|
bbf71be865b8e26676ff85c557b20b334f5953a8
| 4,420
|
py
|
Python
|
tests/queries/recursive/query_typeddict.py
|
s1s5/python-gql-compiler
|
52e0ed7c9fa6deafe2c169c8340d66e8cc168491
|
[
"MIT"
] | null | null | null |
tests/queries/recursive/query_typeddict.py
|
s1s5/python-gql-compiler
|
52e0ed7c9fa6deafe2c169c8340d66e8cc168491
|
[
"MIT"
] | null | null | null |
tests/queries/recursive/query_typeddict.py
|
s1s5/python-gql-compiler
|
52e0ed7c9fa6deafe2c169c8340d66e8cc168491
|
[
"MIT"
] | null | null | null |
# @generated AUTOGENERATED file. Do not Change!
# flake8: noqa
# fmt: off
# isort: skip_file
import typing
from gql import gql, Client
Episode = typing.Literal["NEWHOPE", "EMPIRE", "JEDI"]
GetRecursive__hero__Droid__friends__Droid__friends = typing.TypedDict("GetRecursive__hero__Droid__friends__Droid__friends", {"name": str})
GetRecursive__hero__Droid__friends__Droid = typing.TypedDict("GetRecursive__hero__Droid__friends__Droid", {"__typename": typing.Literal["Droid"], "id": str, "name": str, "friends": typing.List[typing.Optional[GetRecursive__hero__Droid__friends__Droid__friends]]})
GetRecursive__hero__Droid__friends__Human__starships = typing.TypedDict("GetRecursive__hero__Droid__friends__Human__starships", {"name": str})
GetRecursive__hero__Droid__friends__Human = typing.TypedDict("GetRecursive__hero__Droid__friends__Human", {"__typename": typing.Literal["Human"], "id": str, "name": str, "starships": typing.List[typing.Optional[GetRecursive__hero__Droid__friends__Human__starships]]})
__GetRecursive__hero__Droid__friends = typing.TypedDict("__GetRecursive__hero__Droid__friends", {"__typename": typing.Literal["Character"], "id": str})
GetRecursive__hero__Droid__friends = typing.Union[__GetRecursive__hero__Droid__friends, GetRecursive__hero__Droid__friends__Human, GetRecursive__hero__Droid__friends__Droid]
GetRecursive__hero__Droid = typing.TypedDict("GetRecursive__hero__Droid", {"__typename": typing.Literal["Droid"], "name": str, "primaryFunction": str, "friends": typing.List[typing.Optional[GetRecursive__hero__Droid__friends]]})
GetRecursive__hero__Human__friends__Droid = typing.TypedDict("GetRecursive__hero__Human__friends__Droid", {"__typename": typing.Literal["Droid"], "id": str, "name": str})
GetRecursive__hero__Human__friends__Human = typing.TypedDict("GetRecursive__hero__Human__friends__Human", {"__typename": typing.Literal["Human"], "name": str})
__GetRecursive__hero__Human__friends = typing.TypedDict("__GetRecursive__hero__Human__friends", {"__typename": typing.Literal["Character"]})
GetRecursive__hero__Human__friends = typing.Union[__GetRecursive__hero__Human__friends, GetRecursive__hero__Human__friends__Human, GetRecursive__hero__Human__friends__Droid]
GetRecursive__hero__Human = typing.TypedDict("GetRecursive__hero__Human", {"__typename": typing.Literal["Human"], "name": str, "friends": typing.List[typing.Optional[GetRecursive__hero__Human__friends]]})
__GetRecursive__hero = typing.TypedDict("__GetRecursive__hero", {"__typename": typing.Literal["Character"], "name": str})
GetRecursive__hero = typing.Union[__GetRecursive__hero, GetRecursive__hero__Human, GetRecursive__hero__Droid]
GetRecursiveResponse = typing.TypedDict("GetRecursiveResponse", {"hero": GetRecursive__hero})
_GetRecursiveInput__required = typing.TypedDict("_GetRecursiveInput__required", {"episode": Episode})
_GetRecursiveInput__not_required = typing.TypedDict("_GetRecursiveInput__not_required", {}, total=False)
class _GetRecursiveInput(_GetRecursiveInput__required, _GetRecursiveInput__not_required):
pass
class GetRecursive:
Response: typing.TypeAlias = GetRecursiveResponse
Input: typing.TypeAlias = _GetRecursiveInput
_query = gql('''
query GetRecursive($episode: Episode!) {
hero(episode: $episode) {
__typename
name
... on Human {
friends {
__typename
... on Human { name }
... on Droid { id name }
}
}
... on Droid {
primaryFunction
friends {
__typename
id
... on Human {
name
starships { name }
}
... on Droid {
name
friends { name }
}
}
}
}
}
''')
@classmethod
def execute(cls, client: Client, variable_values: _GetRecursiveInput) -> GetRecursiveResponse:
return client.execute( # type: ignore
cls._query, variable_values=variable_values
)
@classmethod
def execute_async(cls, client: Client, variable_values: _GetRecursiveInput) -> typing.Awaitable[GetRecursiveResponse]:
return client.execute_async( # type: ignore
cls._query, variable_values=variable_values
)
| 43.762376
| 267
| 0.721493
| 1,445
| 0.326923
| 0
| 0
| 478
| 0.108145
| 0
| 0
| 1,697
| 0.383937
|
bbf803380db0ef251842437e33a2f97c28f09e88
| 795
|
py
|
Python
|
core/render.py
|
ayyuriss/EigenFunctions
|
8cb6c22871fcddb633392c0a12691e960dad5143
|
[
"MIT"
] | null | null | null |
core/render.py
|
ayyuriss/EigenFunctions
|
8cb6c22871fcddb633392c0a12691e960dad5143
|
[
"MIT"
] | null | null | null |
core/render.py
|
ayyuriss/EigenFunctions
|
8cb6c22871fcddb633392c0a12691e960dad5143
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed May 16 09:32:56 2018
@author: gamer
"""
import pygame as pg
import numpy as np
import skimage.transform as transform
class Render(object):
def __init__(self, window_size=(360,480)):
pg.init()
self.h,self.w = window_size
self.display = pg.display.set_mode((self.w,self.h))
pg.display.set_caption("My Game")
def update(self,vect):
arr = transform.resize(vect,(self.h,self.w),mode='edge',clip=True
).transpose((1,0,2))
arr = (255*arr/np.max(arr)).astype('uint8')
img = pg.surfarray.make_surface(arr[:,:,:])
self.display.blit(img, (0,0))
pg.display.flip()
def quit(self):
pg.quit()
| 24.090909
| 73
| 0.548428
| 632
| 0.794969
| 0
| 0
| 0
| 0
| 0
| 0
| 104
| 0.130818
|
bbf98d99386d0154fceea52ba139487cd08f628c
| 660
|
py
|
Python
|
scripts/branching_recursion.py
|
ithasnext/python_fractals
|
1eea4e464d2073ddd0f9dd2000af101cad23c0f8
|
[
"MIT"
] | null | null | null |
scripts/branching_recursion.py
|
ithasnext/python_fractals
|
1eea4e464d2073ddd0f9dd2000af101cad23c0f8
|
[
"MIT"
] | null | null | null |
scripts/branching_recursion.py
|
ithasnext/python_fractals
|
1eea4e464d2073ddd0f9dd2000af101cad23c0f8
|
[
"MIT"
] | null | null | null |
import pygame
import sys
def setup(w,h,r):
surf = pygame.Surface((w,h))
fract_circle(w/2, h/2, r, surf)
pygame.image.save(surf, str(r)+"_radius.png")
# branching recursion
def fract_circle(x,y, radius, surface):
if radius > 1:
pygame.draw.circle(surface, (0,0,255), (int(x),int(y)), int(radius), 1)
if radius > 8:
fract_circle(x+radius/2,y,radius/2,surface)
fract_circle(x-radius/2,y,radius/2,surface)
fract_circle(x,y+radius/2,radius/2,surface)
fract_circle(x,y-radius/2,radius/2,surface)
width = input("Enter a width: ")
height = input("Enter a height: ")
radius = input("Enter a radius: ")
setup(int(width), int(height), int(radius))
| 27.5
| 73
| 0.689394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 87
| 0.131818
|
bbfa57bb471088a16fc1c6466ecf225acd101941
| 684
|
py
|
Python
|
WorkInProgress/MagnetoMeter/callibrate.py
|
SpudGunMan/LMS-uart-esp
|
95c905cc3dc99349b6b9e7bf0296a6fe0969d2b4
|
[
"BSD-3-Clause"
] | 8
|
2021-03-21T21:34:59.000Z
|
2022-03-25T20:51:47.000Z
|
WorkInProgress/MagnetoMeter/callibrate.py
|
SpudGunMan/LMS-uart-esp
|
95c905cc3dc99349b6b9e7bf0296a6fe0969d2b4
|
[
"BSD-3-Clause"
] | 7
|
2021-04-07T07:40:23.000Z
|
2022-01-22T21:05:40.000Z
|
WorkInProgress/MagnetoMeter/callibrate.py
|
SpudGunMan/LMS-uart-esp
|
95c905cc3dc99349b6b9e7bf0296a6fe0969d2b4
|
[
"BSD-3-Clause"
] | 5
|
2022-01-21T18:37:20.000Z
|
2022-02-17T00:35:28.000Z
|
from hmc5883l import HMC5883L
sensor = HMC5883L(scl=5, sda=4)
valmin=[0,0,0]
valmax=[0,0,0]
valscaled=[0,0,0]
def convert(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
f=open("cal.csv",'w')
for count in range(3000):
valread = sensor.read()
# for i in range(3):
# if valread[i]<valmin[i]: valmin[i]=valread[i]
# if valread[i]>valmax[i]: valmax[i]=valread[i]
# valscaled[i]=convert(valread[i],valmin[i],valmax[i],-100,100)
#degrees, minutes = sensor.heading(valscaled[0], valscaled[1])
print("%04d"%count,valmin,valmax,valread)
f.write("%f,%f,%f\n"%valread)
f.close()
| 27.36
| 75
| 0.631579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 281
| 0.410819
|
bbfb94b2d81a97cc98431123af9d98c4a0ea9623
| 16,554
|
py
|
Python
|
edgetpu/swig/edgetpu_cpp_wrapper.py
|
TinkerEdgeT/mendel-edgetpu
|
5df7f62a2d88dc0d9e98c8c794717d77b62daa89
|
[
"Apache-2.0"
] | null | null | null |
edgetpu/swig/edgetpu_cpp_wrapper.py
|
TinkerEdgeT/mendel-edgetpu
|
5df7f62a2d88dc0d9e98c8c794717d77b62daa89
|
[
"Apache-2.0"
] | null | null | null |
edgetpu/swig/edgetpu_cpp_wrapper.py
|
TinkerEdgeT/mendel-edgetpu
|
5df7f62a2d88dc0d9e98c8c794717d77b62daa89
|
[
"Apache-2.0"
] | null | null | null |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_edgetpu_cpp_wrapper', [dirname(__file__)])
except ImportError:
import _edgetpu_cpp_wrapper
return _edgetpu_cpp_wrapper
if fp is not None:
try:
_mod = imp.load_module('_edgetpu_cpp_wrapper', fp, pathname, description)
finally:
fp.close()
return _mod
_edgetpu_cpp_wrapper = swig_import_helper()
del swig_import_helper
else:
import _edgetpu_cpp_wrapper
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _edgetpu_cpp_wrapper.delete_SwigPyIterator
__del__ = lambda self: None
def value(self):
return _edgetpu_cpp_wrapper.SwigPyIterator_value(self)
def incr(self, n=1):
return _edgetpu_cpp_wrapper.SwigPyIterator_incr(self, n)
def decr(self, n=1):
return _edgetpu_cpp_wrapper.SwigPyIterator_decr(self, n)
def distance(self, x):
return _edgetpu_cpp_wrapper.SwigPyIterator_distance(self, x)
def equal(self, x):
return _edgetpu_cpp_wrapper.SwigPyIterator_equal(self, x)
def copy(self):
return _edgetpu_cpp_wrapper.SwigPyIterator_copy(self)
def next(self):
return _edgetpu_cpp_wrapper.SwigPyIterator_next(self)
def __next__(self):
return _edgetpu_cpp_wrapper.SwigPyIterator___next__(self)
def previous(self):
return _edgetpu_cpp_wrapper.SwigPyIterator_previous(self)
def advance(self, n):
return _edgetpu_cpp_wrapper.SwigPyIterator_advance(self, n)
def __eq__(self, x):
return _edgetpu_cpp_wrapper.SwigPyIterator___eq__(self, x)
def __ne__(self, x):
return _edgetpu_cpp_wrapper.SwigPyIterator___ne__(self, x)
def __iadd__(self, n):
return _edgetpu_cpp_wrapper.SwigPyIterator___iadd__(self, n)
def __isub__(self, n):
return _edgetpu_cpp_wrapper.SwigPyIterator___isub__(self, n)
def __add__(self, n):
return _edgetpu_cpp_wrapper.SwigPyIterator___add__(self, n)
def __sub__(self, *args):
return _edgetpu_cpp_wrapper.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
SwigPyIterator_swigregister = _edgetpu_cpp_wrapper.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
class StringVector(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, StringVector, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, StringVector, name)
__repr__ = _swig_repr
def iterator(self):
return _edgetpu_cpp_wrapper.StringVector_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _edgetpu_cpp_wrapper.StringVector___nonzero__(self)
def __bool__(self):
return _edgetpu_cpp_wrapper.StringVector___bool__(self)
def __len__(self):
return _edgetpu_cpp_wrapper.StringVector___len__(self)
def __getslice__(self, i, j):
return _edgetpu_cpp_wrapper.StringVector___getslice__(self, i, j)
def __setslice__(self, *args):
return _edgetpu_cpp_wrapper.StringVector___setslice__(self, *args)
def __delslice__(self, i, j):
return _edgetpu_cpp_wrapper.StringVector___delslice__(self, i, j)
def __delitem__(self, *args):
return _edgetpu_cpp_wrapper.StringVector___delitem__(self, *args)
def __getitem__(self, *args):
return _edgetpu_cpp_wrapper.StringVector___getitem__(self, *args)
def __setitem__(self, *args):
return _edgetpu_cpp_wrapper.StringVector___setitem__(self, *args)
def pop(self):
return _edgetpu_cpp_wrapper.StringVector_pop(self)
def append(self, x):
return _edgetpu_cpp_wrapper.StringVector_append(self, x)
def empty(self):
return _edgetpu_cpp_wrapper.StringVector_empty(self)
def size(self):
return _edgetpu_cpp_wrapper.StringVector_size(self)
def swap(self, v):
return _edgetpu_cpp_wrapper.StringVector_swap(self, v)
def begin(self):
return _edgetpu_cpp_wrapper.StringVector_begin(self)
def end(self):
return _edgetpu_cpp_wrapper.StringVector_end(self)
def rbegin(self):
return _edgetpu_cpp_wrapper.StringVector_rbegin(self)
def rend(self):
return _edgetpu_cpp_wrapper.StringVector_rend(self)
def clear(self):
return _edgetpu_cpp_wrapper.StringVector_clear(self)
def get_allocator(self):
return _edgetpu_cpp_wrapper.StringVector_get_allocator(self)
def pop_back(self):
return _edgetpu_cpp_wrapper.StringVector_pop_back(self)
def erase(self, *args):
return _edgetpu_cpp_wrapper.StringVector_erase(self, *args)
def __init__(self, *args):
this = _edgetpu_cpp_wrapper.new_StringVector(*args)
try:
self.this.append(this)
except Exception:
self.this = this
def push_back(self, x):
return _edgetpu_cpp_wrapper.StringVector_push_back(self, x)
def front(self):
return _edgetpu_cpp_wrapper.StringVector_front(self)
def back(self):
return _edgetpu_cpp_wrapper.StringVector_back(self)
def assign(self, n, x):
return _edgetpu_cpp_wrapper.StringVector_assign(self, n, x)
def resize(self, *args):
return _edgetpu_cpp_wrapper.StringVector_resize(self, *args)
def insert(self, *args):
return _edgetpu_cpp_wrapper.StringVector_insert(self, *args)
def reserve(self, n):
return _edgetpu_cpp_wrapper.StringVector_reserve(self, n)
def capacity(self):
return _edgetpu_cpp_wrapper.StringVector_capacity(self)
__swig_destroy__ = _edgetpu_cpp_wrapper.delete_StringVector
__del__ = lambda self: None
StringVector_swigregister = _edgetpu_cpp_wrapper.StringVector_swigregister
StringVector_swigregister(StringVector)
def GetRuntimeVersion():
"""
Returns runtime (libedgetpu.so) version.
The version is dynamically retrieved from shared object.
Retruns:
string.
"""
return _edgetpu_cpp_wrapper.GetRuntimeVersion()
class BasicEngine(_object):
"""Python wrapper for BasicEngine."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, BasicEngine, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, BasicEngine, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
Initializes BasicEngine with model's path.
Args:
model_path: [required] string.
device_path: [optional] string, path to Edge TPU device.
See ListEdgeTpuPaths() for path example.
"""
this = _edgetpu_cpp_wrapper.new_BasicEngine(*args)
try:
self.this.append(this)
except Exception:
self.this = this
__swig_destroy__ = _edgetpu_cpp_wrapper.delete_BasicEngine
__del__ = lambda self: None
def RunInference(self, input):
"""
Runs inference with given input.
Args:
input: 1-D numpy.array. Flattened input tensor.
Returns:
(latency, output_tensors). Latency is milliseconds in float while
output_tensors is 1-D numpy.array. If there are multiple output tensors,
it will be compressed into a 1-D array. You can use
get_all_output_tensors_sizes, get_num_of_output_tensors and
get_output_tensor_size to calculate the offset for each tensor.
For example, if the model output 2 tensors with value [1, 2, 3] and
[0.1, 0.4, 0.9], output_tesnors will be [1, 2, 3, 0.1, 0.4, 0.9].
"""
return _edgetpu_cpp_wrapper.BasicEngine_RunInference(self, input)
def get_input_tensor_shape(self):
"""
Gets shape of required input tensor.
For models trained for image classification / detection, it's always
(1, height, width, channels). After flatten, the 1-D array with size
height * width channels is the required input for RunInference.
Returns:
1-D numpy.array.
"""
return _edgetpu_cpp_wrapper.BasicEngine_get_input_tensor_shape(self)
def get_all_output_tensors_sizes(self):
"""
Gets sizes of output tensors.
A model may ouput several tensors, but in RunInference and get_raw_output
we'll concacate them as one. This funcion will return the sizes of original
output tesnors, which can be used to calculate the offset.
Returns:
Numpy.array represents the sizes of output tensors.
"""
return _edgetpu_cpp_wrapper.BasicEngine_get_all_output_tensors_sizes(self)
def get_num_of_output_tensors(self):
"""
Gets number of output tensors.
Returns:
An integer representing number of output tensors.
"""
return _edgetpu_cpp_wrapper.BasicEngine_get_num_of_output_tensors(self)
def get_output_tensor_size(self, tensor_index):
"""
Gets size of specific output tensor.
Args:
tensor_index: integer, the index of the output tensor.
Returns:
An integer representing the size of the output tensor.
"""
return _edgetpu_cpp_wrapper.BasicEngine_get_output_tensor_size(self, tensor_index)
def required_input_array_size(self):
"""
Returns required size of input array of RunInference.
Returns:
An integer representing the size of the input array used for RunInference.
"""
return _edgetpu_cpp_wrapper.BasicEngine_required_input_array_size(self)
def total_output_array_size(self):
"""
Gets expected size of output array returned by RunInference.
Returns:
An integer representing the size of output_tensors returned by
RunInference().
"""
return _edgetpu_cpp_wrapper.BasicEngine_total_output_array_size(self)
def model_path(self):
"""
Gets the path of model loaded in the engine.
Returns:
A string representing the model file's path.
"""
return _edgetpu_cpp_wrapper.BasicEngine_model_path(self)
def get_raw_output(self):
"""
Gets output_tensors of last inference.
This can be used by higher level engines for debugging.
Returns:
A numpy.array.
"""
return _edgetpu_cpp_wrapper.BasicEngine_get_raw_output(self)
def get_inference_time(self):
"""
Gets latency of last inference.
This can be used by higher level engines for debugging.
Returns:
A float number(in milliseconds).
"""
return _edgetpu_cpp_wrapper.BasicEngine_get_inference_time(self)
def device_path(self):
"""
Gets associated device path of this BasicEngine instance.
Returns:
A string representing corresponding Edge TPU device path.
"""
return _edgetpu_cpp_wrapper.BasicEngine_device_path(self)
BasicEngine_swigregister = _edgetpu_cpp_wrapper.BasicEngine_swigregister
BasicEngine_swigregister(BasicEngine)
cvar = _edgetpu_cpp_wrapper.cvar
kEdgeTpuCppWrapperVersion = cvar.kEdgeTpuCppWrapperVersion
kSupportedRuntimeVersion = cvar.kSupportedRuntimeVersion
class ImprintingEngine(_object):
"""Engine used for imprinting method based transfer learning."""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ImprintingEngine, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ImprintingEngine, name)
__repr__ = _swig_repr
def __init__(self, model_path):
"""
Initializes ImprintingEngine with embedding extractor/model's path.
Args:
model_path: string, path of the embedding extractor or the model previous
trained with ImprintingEngine.
"""
this = _edgetpu_cpp_wrapper.new_ImprintingEngine(model_path)
try:
self.this.append(this)
except Exception:
self.this = this
def SaveModel(self, output_path):
"""
Saves trained model as '.tflite' file.
Args:
output_path: string, ouput path of the trained model.
"""
return _edgetpu_cpp_wrapper.ImprintingEngine_SaveModel(self, output_path)
def Train(self, input):
"""
Trains model with a set of images from same class.
Args:
input: list of numpy.array. Each numpy.array represents as a 1-D tensor
converted from an image.
Returns:
int, the label_id for the class.
"""
return _edgetpu_cpp_wrapper.ImprintingEngine_Train(self, input)
__swig_destroy__ = _edgetpu_cpp_wrapper.delete_ImprintingEngine
__del__ = lambda self: None
ImprintingEngine_swigregister = _edgetpu_cpp_wrapper.ImprintingEngine_swigregister
ImprintingEngine_swigregister(ImprintingEngine)
_edgetpu_cpp_wrapper.EdgeTpuState_kNone_swigconstant(_edgetpu_cpp_wrapper)
EdgeTpuState_kNone = _edgetpu_cpp_wrapper.EdgeTpuState_kNone
_edgetpu_cpp_wrapper.EdgeTpuState_kAssigned_swigconstant(_edgetpu_cpp_wrapper)
EdgeTpuState_kAssigned = _edgetpu_cpp_wrapper.EdgeTpuState_kAssigned
_edgetpu_cpp_wrapper.EdgeTpuState_kUnassigned_swigconstant(_edgetpu_cpp_wrapper)
EdgeTpuState_kUnassigned = _edgetpu_cpp_wrapper.EdgeTpuState_kUnassigned
def ListEdgeTpuPaths(state):
"""
Lists paths of Edge TPU devices available to host.
Args:
state: device's current state. Can be:
EDGE_TPU_STATE_ASSIGNED: devices that are associated with BasicEngine instance.
EDGE_TPU_STATE_UNASSIGNED: devices that are available.
EDGE_TPU_STATE_NONE: ASSIGNED or UNASSIGNED, all devices detected by host.
Returns:
tuple of strings, which represents device paths in certain state.
"""
return _edgetpu_cpp_wrapper.ListEdgeTpuPaths(state)
# This file is compatible with both classic and new-style classes.
| 31.411765
| 100
| 0.691434
| 12,106
| 0.731304
| 0
| 0
| 0
| 0
| 0
| 0
| 4,851
| 0.293041
|
bbfba00ada95ca4b323dab1489addc7b7c3e9bf4
| 13,774
|
py
|
Python
|
pyriemann/utils/mean.py
|
qbarthelemy/pyRiemann
|
b35873b0a6cf9d81a1db09bbedb72a2fefe7d0c3
|
[
"BSD-3-Clause"
] | 1
|
2021-09-30T01:18:51.000Z
|
2021-09-30T01:18:51.000Z
|
pyriemann/utils/mean.py
|
qbarthelemy/pyRiemann
|
b35873b0a6cf9d81a1db09bbedb72a2fefe7d0c3
|
[
"BSD-3-Clause"
] | null | null | null |
pyriemann/utils/mean.py
|
qbarthelemy/pyRiemann
|
b35873b0a6cf9d81a1db09bbedb72a2fefe7d0c3
|
[
"BSD-3-Clause"
] | null | null | null |
"""Mean covariance estimation."""
from copy import deepcopy
import numpy as np
from .base import sqrtm, invsqrtm, logm, expm
from .ajd import ajd_pham
from .distance import distance_riemann
from .geodesic import geodesic_riemann
def _get_sample_weight(sample_weight, data):
"""Get the sample weights.
If none provided, weights init to 1. otherwise, weights are normalized.
"""
if sample_weight is None:
sample_weight = np.ones(data.shape[0])
if len(sample_weight) != data.shape[0]:
raise ValueError("len of sample_weight must be equal to len of data.")
sample_weight /= np.sum(sample_weight)
return sample_weight
def mean_riemann(covmats, tol=10e-9, maxiter=50, init=None,
sample_weight=None):
r"""Return the mean covariance matrix according to the Riemannian metric.
The procedure is similar to a gradient descent minimizing the sum of
riemannian distance to the mean.
.. math::
\mathbf{C} = \arg\min{(\sum_i \delta_R ( \mathbf{C} , \mathbf{C}_i)^2)}
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param init: A covariance matrix used to initialize the gradient descent. If None the Arithmetic mean is used
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
""" # noqa
# init
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
if init is None:
C = np.mean(covmats, axis=0)
else:
C = init
k = 0
nu = 1.0
tau = np.finfo(np.float64).max
crit = np.finfo(np.float64).max
# stop when J<10^-9 or max iteration = 50
while (crit > tol) and (k < maxiter) and (nu > tol):
k = k + 1
C12 = sqrtm(C)
Cm12 = invsqrtm(C)
J = np.zeros((n_channels, n_channels))
for index in range(n_trials):
tmp = np.dot(np.dot(Cm12, covmats[index, :, :]), Cm12)
J += sample_weight[index] * logm(tmp)
crit = np.linalg.norm(J, ord='fro')
h = nu * crit
C = np.dot(np.dot(C12, expm(nu * J)), C12)
if h < tau:
nu = 0.95 * nu
tau = h
else:
nu = 0.5 * nu
return C
def mean_logeuclid(covmats, sample_weight=None):
r"""Return the mean covariance matrix according to the log-Euclidean
metric.
.. math::
\mathbf{C} = \exp{(\frac{1}{N} \sum_i \log{\mathbf{C}_i})}
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
"""
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
T = np.zeros((n_channels, n_channels))
for index in range(n_trials):
T += sample_weight[index] * logm(covmats[index, :, :])
C = expm(T)
return C
def mean_kullback_sym(covmats, sample_weight=None):
"""Return the mean covariance matrix according to KL divergence.
This mean is the geometric mean between the Arithmetic and the Harmonic
mean, as shown in [1]_.
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
References
----------
.. [1] Moakher, Maher, and Philipp G. Batchelor. "Symmetric
positive-definite matrices: From geometry to applications and
visualization." In Visualization and Processing of Tensor Fields, pp.
285-298. Springer Berlin Heidelberg, 2006.
"""
C_Arithmetic = mean_euclid(covmats, sample_weight)
C_Harmonic = mean_harmonic(covmats, sample_weight)
C = geodesic_riemann(C_Arithmetic, C_Harmonic, 0.5)
return C
def mean_harmonic(covmats, sample_weight=None):
r"""Return the harmonic mean of a set of covariance matrices.
.. math::
\mathbf{C} = \left(\frac{1}{N} \sum_i {\mathbf{C}_i}^{-1}\right)^{-1}
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
"""
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
T = np.zeros((n_channels, n_channels))
for index in range(n_trials):
T += sample_weight[index] * np.linalg.inv(covmats[index, :, :])
C = np.linalg.inv(T)
return C
def mean_logdet(covmats, tol=10e-5, maxiter=50, init=None, sample_weight=None):
r"""Return the mean covariance matrix according to the logdet metric.
This is an iterative procedure where the update is:
.. math::
\mathbf{C} = \left(\sum_i \left( 0.5 \mathbf{C} + 0.5 \mathbf{C}_i \right)^{-1} \right)^{-1}
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param init: A covariance matrix used to initialize the iterative procedure. If None the Arithmetic mean is used
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
""" # noqa
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
if init is None:
C = np.mean(covmats, axis=0)
else:
C = init
k = 0
crit = np.finfo(np.float64).max
# stop when J<10^-9 or max iteration = 50
while (crit > tol) and (k < maxiter):
k = k + 1
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
J += sample_weight[index] * np.linalg.inv(0.5 * Ci + 0.5 * C)
Cnew = np.linalg.inv(J)
crit = np.linalg.norm(Cnew - C, ord='fro')
C = Cnew
return C
def mean_wasserstein(covmats, tol=10e-4, maxiter=50, init=None,
sample_weight=None):
r"""Return the mean covariance matrix according to the Wasserstein metric.
This is an iterative procedure where the update is [1]_:
.. math::
\mathbf{K} = \left(\sum_i \left( \mathbf{K} \mathbf{C}_i \mathbf{K} \right)^{1/2} \right)^{1/2}
with :math:`\mathbf{K} = \mathbf{C}^{1/2}`.
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param init: A covariance matrix used to initialize the iterative procedure. If None the Arithmetic mean is used
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
References
----------
.. [1] Barbaresco, F. "Geometric Radar Processing based on Frechet distance:
Information geometry versus Optimal Transport Theory", Radar Symposium
(IRS), 2011 Proceedings International.
""" # noqa
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
if init is None:
C = np.mean(covmats, axis=0)
else:
C = init
k = 0
K = sqrtm(C)
crit = np.finfo(np.float64).max
# stop when J<10^-9 or max iteration = 50
while (crit > tol) and (k < maxiter):
k = k + 1
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
tmp = np.dot(np.dot(K, Ci), K)
J += sample_weight[index] * sqrtm(tmp)
Knew = sqrtm(J)
crit = np.linalg.norm(Knew - K, ord='fro')
K = Knew
if k == maxiter:
print('Max iter reach')
C = np.dot(K, K)
return C
def mean_euclid(covmats, sample_weight=None):
r"""Return the mean covariance matrix according to the Euclidean metric :
.. math::
\mathbf{C} = \frac{1}{N} \sum_i \mathbf{C}_i
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
"""
return np.average(covmats, axis=0, weights=sample_weight)
def mean_ale(covmats, tol=10e-7, maxiter=50, sample_weight=None):
"""Return the mean covariance matrix according using the AJD-based
log-Euclidean Mean (ALE). See [1].
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
Notes
-----
.. versionadded:: 0.2.4
References
----------
[1] M. Congedo, B. Afsari, A. Barachant, M. Moakher, 'Approximate Joint
Diagonalization and Geometric Mean of Symmetric Positive Definite
Matrices', PLoS ONE, 2015
"""
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
crit = np.inf
k = 0
# init with AJD
B, _ = ajd_pham(covmats)
while (crit > tol) and (k < maxiter):
k += 1
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
tmp = logm(np.dot(np.dot(B.T, Ci), B))
J += sample_weight[index] * tmp
update = np.diag(np.diag(expm(J)))
B = np.dot(B, invsqrtm(update))
crit = distance_riemann(np.eye(n_channels), update)
A = np.linalg.inv(B)
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
tmp = logm(np.dot(np.dot(B.T, Ci), B))
J += sample_weight[index] * tmp
C = np.dot(np.dot(A.T, expm(J)), A)
return C
def mean_alm(covmats, tol=1e-14, maxiter=100,
verbose=False, sample_weight=None):
r"""Return Ando-Li-Mathias (ALM) mean
Find the geometric mean recursively [1]_, generalizing from:
.. math::
\mathbf{C} = A^{\frac{1}{2}}(A^{-\frac{1}{2}}B^{\frac{1}{2}}A^{-\frac{1}{2}})^{\frac{1}{2}}A^{\frac{1}{2}}
require a high number of iterations.
This is the adaptation of the Matlab code proposed by Dario Bini and
Bruno Iannazzo, http://bezout.dm.unipi.it/software/mmtoolbox/
Extremely slow, due to the recursive formulation.
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop iterations
:param maxiter: maximum number of iteration, default 100
:param verbose: indicate when reaching maxiter
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
Notes
-----
.. versionadded:: 0.2.8.dev
References
----------
.. [1] T. Ando, C.-K. Li and R. Mathias, "Geometric Means", Linear Algebra
Appl. 385 (2004), 305-334.
""" # noqa
sample_weight = _get_sample_weight(sample_weight, covmats)
C = covmats
C_iter = np.zeros_like(C)
n_trials = covmats.shape[0]
if n_trials == 2:
alpha = sample_weight[1] / sample_weight[0] / 2
X = geodesic_riemann(covmats[0], covmats[1], alpha=alpha)
return X
else:
for k in range(maxiter):
for h in range(n_trials):
s = np.mod(np.arange(h, h + n_trials - 1) + 1, n_trials)
C_iter[h] = mean_alm(C[s], sample_weight=sample_weight[s])
norm_iter = np.linalg.norm(C_iter[0] - C[0], 2)
norm_c = np.linalg.norm(C[0], 2)
if (norm_iter / norm_c) < tol:
break
C = deepcopy(C_iter)
else:
if verbose:
print('Max number of iterations reached')
return C_iter.mean(axis=0)
def mean_identity(covmats, sample_weight=None):
r"""Return the identity matrix corresponding to the covmats sit size
.. math::
\mathbf{C} = \mathbf{I}_d
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:returns: the identity matrix of size n_channels
"""
C = np.eye(covmats.shape[1])
return C
def mean_covariance(covmats, metric='riemann', sample_weight=None, *args):
"""Return the mean covariance matrix according to the metric
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param metric: the metric (default 'riemann'), can be : 'riemann',
'logeuclid', 'euclid', 'logdet', 'identity', 'wasserstein', 'ale',
'alm', 'harmonic', 'kullback_sym' or a callable function
:param sample_weight: the weight of each sample
:param args: the argument passed to the sub function
:returns: the mean covariance matrix
"""
if callable(metric):
C = metric(covmats, sample_weight=sample_weight, *args)
else:
C = mean_methods[metric](covmats, sample_weight=sample_weight, *args)
return C
mean_methods = {'riemann': mean_riemann,
'logeuclid': mean_logeuclid,
'euclid': mean_euclid,
'identity': mean_identity,
'logdet': mean_logdet,
'wasserstein': mean_wasserstein,
'ale': mean_ale,
'harmonic': mean_harmonic,
'kullback_sym': mean_kullback_sym,
'alm': mean_alm}
def _check_mean_method(method):
"""checks methods """
if isinstance(method, str):
if method not in mean_methods.keys():
raise ValueError('Unknown mean method')
else:
method = mean_methods[method]
elif not hasattr(method, '__call__'):
raise ValueError('mean method must be a function or a string.')
return method
| 32.795238
| 116
| 0.635908
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7,064
| 0.51285
|
bbfe214e8149d499ea33bd81fda220abbce8fdef
| 3,578
|
py
|
Python
|
fabfile.py
|
nprapps/sitemaps
|
6be4393d881b3c8766d35fbe479873247f05c13b
|
[
"FSFAP"
] | null | null | null |
fabfile.py
|
nprapps/sitemaps
|
6be4393d881b3c8766d35fbe479873247f05c13b
|
[
"FSFAP"
] | null | null | null |
fabfile.py
|
nprapps/sitemaps
|
6be4393d881b3c8766d35fbe479873247f05c13b
|
[
"FSFAP"
] | 1
|
2021-02-18T11:24:28.000Z
|
2021-02-18T11:24:28.000Z
|
#!/usr/bin/env python
from fabric.api import *
import app
import app_config
"""
Environments
Changing environment requires a full-stack test.
An environment points to both a server and an S3
bucket.
"""
def production():
env.settings = 'production'
env.s3_buckets = app_config.PRODUCTION_S3_BUCKETS
def staging():
env.settings = 'staging'
env.s3_buckets = app_config.STAGING_S3_BUCKETS
"""
Template-specific functions
Changing the template functions should produce output
with fab render without any exceptions. Any file used
by the site templates should be rendered by fab render.
"""
def update_index():
"""
Downloads a Google Doc as an .xls file.
"""
base_url = 'https://docs.google.com/spreadsheet/pub?key=%s&output=csv'
doc_url = base_url % app_config.SITEMAP_GOOGLE_DOC_KEY
local('curl -o data/index.csv "%s"' % doc_url)
def render():
"""
Render HTML templates and compile assets.
"""
update_index()
# Fake out deployment target
app_config.configure_targets(env.get('settings', None))
for rule in app.app.url_map.iter_rules():
rule_string = rule.rule
name = rule.endpoint
if name == 'static':
continue
filename = 'www' + rule_string
print 'Rendering %s' % (filename)
with app.app.test_request_context(path=rule_string):
view = app.__dict__[name]
content = view()[0]
with open(filename, 'w') as f:
f.write(content.encode('utf-8'))
# Un-fake-out deployment target
app_config.configure_targets(app_config.DEPLOYMENT_TARGET)
"""
Deployment
Changes to deployment requires a full-stack test. Deployment
has two primary functions: Pushing flat files to S3 and deploying
code to a remote server if required.
"""
def _deploy_to_s3():
"""
Deploy the gzipped stuff to S3.
"""
s3cmd = 's3cmd -P --add-header=Cache-Control:max-age=5 --guess-mime-type --recursive --exclude-from gzip_types.txt put gzip/ %s'
s3cmd_gzip = 's3cmd -P --add-header=Cache-Control:max-age=5 --add-header=Content-encoding:gzip --guess-mime-type --recursive --exclude "*" --include-from gzip_types.txt put gzip/ %s'
for bucket in env.s3_buckets:
env.s3_bucket = bucket
local(s3cmd % ('s3://%(s3_bucket)s/' % env))
local(s3cmd_gzip % ('s3://%(s3_bucket)s/' % env))
def _gzip_www():
"""
Gzips everything in www and puts it all in gzip
"""
local('python gzip_www.py')
local('rm -rf gzip/live-data')
def deploy():
"""
Deploy the latest app to S3 and, if configured, to our servers.
"""
require('settings', provided_by=[production, staging])
render()
_gzip_www()
_deploy_to_s3()
"""
Destruction
Changes to destruction require setup/deploy to a test host in order to test.
Destruction should remove all files related to the project from both a remote
host and S3.
"""
def _confirm(message):
answer = prompt(message, default="Not at all")
if answer.lower() not in ('y', 'yes', 'buzz off', 'screw you'):
exit()
def shiva_the_destroyer():
"""
Deletes the app from s3
"""
require('settings', provided_by=[production, staging])
_confirm("You are about to destroy everything deployed to %(settings)s for this project.\nDo you know what you're doing?" % env)
with settings(warn_only=True):
s3cmd = 's3cmd del --recursive %s'
for bucket in env.s3_buckets:
env.s3_bucket = bucket
local(s3cmd % ('s3://%(s3_bucket)s/%(project_slug)s' % env))
| 27.312977
| 186
| 0.66322
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,884
| 0.526551
|
bbfebfa3a6e07ffb390ccc9c51bbfd1c5eb387b7
| 2,531
|
py
|
Python
|
img-xlsx.py
|
jherskovic/img-xlsx
|
ba301b43c8a3df2282622e70904fcb2d55bad2a3
|
[
"CNRI-Python"
] | null | null | null |
img-xlsx.py
|
jherskovic/img-xlsx
|
ba301b43c8a3df2282622e70904fcb2d55bad2a3
|
[
"CNRI-Python"
] | 4
|
2019-08-25T13:16:03.000Z
|
2021-01-07T23:20:24.000Z
|
img-xlsx.py
|
jherskovic/img-xlsx
|
ba301b43c8a3df2282622e70904fcb2d55bad2a3
|
[
"CNRI-Python"
] | null | null | null |
from PIL import Image
from openpyxl import Workbook
from openpyxl.styles import PatternFill
from openpyxl.utils import get_column_letter
from functools import partial
import sys
import argparse
def rgb_to_xls_hex(rgb_tuple, image_mode='RGB'):
if image_mode == 'RGB':
r, g, b = rgb_tuple
elif image_mode == 'RGBA':
# Ignore alpha channel in images that have one.
r, g, b, _ = rgb_tuple
return f'{r:02x}{g:02x}{b:02x}'
def handle_arguments():
parser = argparse.ArgumentParser(description='Convert an image file to an Excel spreadsheet. I\'m sorry.')
parser.add_argument('--size', dest='size', type=int, default=64,
help='The number of cells for the largest dimension of the image. '
'Defaults to 64. Up to 512 works well for landscape images, up to 256 '
'for portrait images.')
parser.add_argument('--quantize', dest='quantize', metavar='NUM_COLORS', type=int, default=0,
help='Quantize the image (i.e. set an upper bound on the number of colors). '
'Max 255.')
parser.add_argument('image', metavar='FILENAME', type=str,
help='The image file to turn into an Excel spreadsheet. JPGs and PNGs work well.')
parser.add_argument('xlsx', metavar='FILENAME', type=str,
help='The output filename. Should end in .xlsx')
args = parser.parse_args()
return args
def convert(args):
im = Image.open(args.image)
maxsize = (args.size, args.size)
im.thumbnail(maxsize)
if args.quantize > 0 and args.quantize < 256:
quantized = im.quantize(colors=args.quantize)
im = quantized
if im.mode in ['P', 'L']:
image = im.convert("RGB")
else:
image = im
pixels=image.load()
pixel_converter = partial(rgb_to_xls_hex, image_mode=image.mode)
# Get the final image size
size_x, size_y = image.size
out_wb = Workbook()
out = out_wb.active
for y in range(size_y):
for x in range(size_x):
cell = out.cell(y+1, x+1)
rgb = pixels[x, y]
cell.fill = PatternFill("solid", fgColor=pixel_converter(rgb))
for col in range(1, size_x+1):
out.column_dimensions[get_column_letter(col)].width = 3
out_wb.save(args.xlsx)
if __name__ == "__main__":
args = handle_arguments()
convert(args)
| 33.746667
| 111
| 0.600948
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 639
| 0.252469
|
bbff69aa5097c6b5253948d0d9595188ebebf3c2
| 7,502
|
py
|
Python
|
tests/test_multithread_access.py
|
TimChild/dat_analysis
|
2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73
|
[
"MIT"
] | null | null | null |
tests/test_multithread_access.py
|
TimChild/dat_analysis
|
2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73
|
[
"MIT"
] | null | null | null |
tests/test_multithread_access.py
|
TimChild/dat_analysis
|
2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from dat_analysis.dat_object.dat_hdf import DatHDF
from dat_analysis.hdf_file_handler import HDFFileHandler
from dat_analysis.dat_object.make_dat import get_dat, get_dats, DatHandler
from tests.helpers import get_testing_Exp2HDF
from dat_analysis.data_standardize.exp_specific.Feb21 import Feb21Exp2HDF
import concurrent.futures
import os
import h5py
import numpy as np
import shutil
import time
from tests import helpers
dat_dir = os.path.abspath('fixtures/dats/2021Feb')
# Where to put outputs (i.e. DatHDFs)
output_dir = os.path.abspath('Outputs/test_multithread_access')
hdf_folder_path = os.path.join(output_dir, 'Dat_HDFs')
Testing_Exp2HDF = get_testing_Exp2HDF(dat_dir, output_dir, base_class=Feb21Exp2HDF)
def read(datnum: DatHDF):
dat = get_dat(datnum, exp2hdf=Testing_Exp2HDF)
val = dat._threaded_read_test()
return val
def write(datnum: DatHDF, value):
dat = get_dat(datnum, exp2hdf=Testing_Exp2HDF)
val = dat._threaded_write_test(value)
return val
def mutithread_read(datnums):
with concurrent.futures.ThreadPoolExecutor(max_workers=len(datnums) + 3) as executor:
same_dat_results = [executor.submit(read, datnums[0]) for i in range(3)]
diff_dat_results = [executor.submit(read, num) for num in datnums]
same_dat_results = [r.result() for r in same_dat_results]
diff_dat_results = [r.result() for r in diff_dat_results]
return same_dat_results, diff_dat_results
class TestMultiAccess(TestCase):
def setUp(self):
"""
Note: This actually requires quite a lot of things to be working to run (get_dats does quite a lot of work)
Returns:
"""
print('running setup')
# SetUp before tests
helpers.clear_outputs(output_dir)
self.dats = get_dats([717, 719, 720, 723, 724, 725], exp2hdf=Testing_Exp2HDF, overwrite=True)
# if __name__ == '__main__':
# helpers.clear_outputs(output_dir)
# self.dats = get_dats([717, 719, 720, 723, 724, 725], exp2hdf=Testing_Exp2HDF, overwrite=True)
# else:
# self.dats = get_dats([717, 719, 720, 723, 724, 725], exp2hdf=Testing_Exp2HDF, overwrite=False)
def tearDown(self) -> None:
DatHandler().clear_dats()
def set_test_attrs(self, dats, values):
for dat, value in zip(dats, values):
with HDFFileHandler(dat.hdf.hdf_path, 'r+') as f:
# with h5py.File(dat.hdf.hdf_path, 'r+') as f:
f.attrs['threading_test_var'] = value
def test_threaded_read(self):
"""Check multiple read threads can run at the same time"""
dats = self.dats
values = [dat.datnum for dat in dats]
self.set_test_attrs(dats, values)
with concurrent.futures.ThreadPoolExecutor(max_workers=len(self.dats)+10) as executor:
same_dat_results = [executor.submit(read, dats[0].datnum) for i in range(10)]
diff_dat_results = [executor.submit(read, dat.datnum) for dat in dats]
same_dat_results = [r.result() for r in same_dat_results]
diff_dat_results = [r.result() for r in diff_dat_results]
self.assertEqual(same_dat_results, [dats[0].datnum]*10)
self.assertEqual(diff_dat_results, [dat.datnum for dat in dats])
def test_threaded_write(self):
"""Check multiple threads trying to write at same time don't clash"""
dats = self.dats
values = ['not set' for dat in dats]
self.set_test_attrs(dats, values)
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
same_dat_writes = [executor.submit(write, dats[0].datnum, i) for i in range(10)]
value = read(dats[0].datnum)
self.assertTrue(value in [r.result() for r in same_dat_writes]) # Check that the final value was one of the writes at least
with concurrent.futures.ThreadPoolExecutor(max_workers=len(self.dats)) as executor:
diff_dat_writes = executor.map(lambda args: write(*args), [(dat.datnum, dat.datnum) for dat in dats])
with concurrent.futures.ThreadPoolExecutor(max_workers=len(self.dats)) as executor:
diff_dat_reads = executor.map(read, [dat.datnum for dat in dats])
diff_dat_writes = [r for r in diff_dat_writes]
diff_dat_reads = [r for r in diff_dat_reads]
self.assertEqual(diff_dat_reads, diff_dat_writes)
def test_multiprocess_read(self):
"""Check multiple read threads can run at the same time"""
dats = self.dats
values = [dat.datnum for dat in dats]
self.set_test_attrs(dats, values)
with concurrent.futures.ProcessPoolExecutor(max_workers=len(self.dats)+3) as executor:
same_dat_results = [executor.submit(read, dats[0].datnum) for i in range(3)]
diff_dat_results = [executor.submit(read, dat.datnum) for dat in dats]
same_dat_results = [r.result() for r in same_dat_results]
diff_dat_results = [r.result() for r in diff_dat_results]
self.assertEqual(same_dat_results, [dats[0].datnum]*3)
self.assertEqual(diff_dat_results, [dat.datnum for dat in dats])
def test_multiprocess_write_same_dat(self):
"""Check multiple threads trying to write at same time don't clash"""
dat = self.dats[0]
values = ['not set']
self.set_test_attrs([dat], values)
with concurrent.futures.ProcessPoolExecutor(max_workers=3) as executor:
same_dat_writes = [executor.submit(write, dat.datnum, i) for i in range(3)]
value = read(dat.datnum)
self.assertTrue(value in [r.result() for r in same_dat_writes]) # Check that the final value was one of the writes at least
def test_multiprocess_write_multiple_dats(self):
"""Check multiple threads trying to write at same time don't clash"""
dats = self.dats
values = ['not set' for dat in dats]
self.set_test_attrs(dats, values)
with concurrent.futures.ProcessPoolExecutor(max_workers=len(self.dats)) as executor:
diff_dat_writes = [executor.submit(write, dat.datnum, dat.datnum) for dat in dats]
with concurrent.futures.ProcessPoolExecutor(max_workers=len(self.dats)) as executor:
diff_dat_reads = [executor.submit(read, dat.datnum) for dat in dats]
diff_dat_writes = [r.result() for r in diff_dat_writes]
diff_dat_reads = [r.result() for r in diff_dat_reads]
self.assertEqual(diff_dat_reads, diff_dat_writes)
def test_hdf_write_inside_read(self):
dat = self.dats[0]
before, after = dat._write_inside_read_test()
print(before, after)
self.assertEqual(after, before + 1)
def test_hdf_read_inside_write(self):
dat = self.dats[0]
before, after = dat._read_inside_write_test()
print(before, after)
self.assertEqual(after, before + 1)
def test_multiprocess_multithread_read(self):
dats = self.dats
values = [dat.datnum for dat in dats]
self.set_test_attrs(dats, values)
datnums = [dat.datnum for dat in dats]
with concurrent.futures.ProcessPoolExecutor(max_workers=3) as executor:
results = [executor.submit(mutithread_read, datnums) for i in range(3)]
for r in results:
result = r.result()
same_nums, diff_nums = result
self.assertEqual(same_nums, [datnums[0]]*3)
self.assertEqual(diff_nums, datnums)
| 40.551351
| 132
| 0.681818
| 6,028
| 0.803519
| 0
| 0
| 0
| 0
| 0
| 0
| 1,098
| 0.146361
|
a51f8b0d486e0ae6fcf2e60b6ae5a88312c39cab
| 2,721
|
py
|
Python
|
early_projects/theater.py
|
JSBCCA/pythoncode
|
b7f2af8b0efc2d01d3e4568265eb3a5038a8679f
|
[
"MIT"
] | null | null | null |
early_projects/theater.py
|
JSBCCA/pythoncode
|
b7f2af8b0efc2d01d3e4568265eb3a5038a8679f
|
[
"MIT"
] | null | null | null |
early_projects/theater.py
|
JSBCCA/pythoncode
|
b7f2af8b0efc2d01d3e4568265eb3a5038a8679f
|
[
"MIT"
] | null | null | null |
import myshop
def movie(name):
two = round((9.99 * 1.07), 2)
print("Here is your Ticket and movie receipt.\n[Ticket for", name,
" - $" + str(two) + "]\nEnjoy the film!")
def concession():
print(" Refreshments:\n"
"Popcorn - $5.05\n"
"Coke - $2.19\n"
"Cookies - $1.50\n"
"Alright, you want to buy-\n")
a = int(input("How many Popcorn buckets? ").strip())
b = int(input("How many Cokes? ").strip())
c = int(input("How many Cookies? ").strip())
myshop.myshop(a, b, c)
def theater():
name = input("Hello! What is your name?").strip().capitalize()
film = input("Thank you for coming, " + name + "! " + "Welcome to "
"the Malco Theater!\n"
"What film would you like to go see today?\n"
" Films:\n"
"The Avengers: 8:00\n"
"Frozen: 7:00\n"
"Star Wars: 7:30\n"
"Harry Potter: 5:00\n"
"Shrek: 4:30\n"
"\n"
" Tickets: $9.99").strip().lower()
if film == "the avengers":
would = input("Would you like to buy some concessions?").strip().lower(
)
if would == "yes":
concession()
movie(film.title())
else:
print("Just the movie then? Alright.")
movie(film.title())
elif film == "frozen":
would = input("Would you like to buy some concessions?").strip().lower(
)
if would == "yes":
concession()
movie(film.title())
else:
print("Just the movie then? Alright.")
movie(film.title())
elif film == "star wars":
would = input("Would you like to buy some concessions?").strip().lower(
)
if would == "yes":
concession()
movie(film.title())
else:
print("Just the movie then? Alright.")
movie(film.title())
elif film == "harry potter":
would = input("Would you like to buy some concessions?").strip().lower(
)
if would == "yes":
concession()
movie(film.title())
else:
print("Just the movie then? Alright.")
movie(film.title())
elif film == "shrek":
would = input("Would you like to buy some concessions?").strip().lower(
)
if would == "yes":
concession()
movie(film.title())
else:
print("Just the movie then? Alright.")
movie(film.title())
else:
print("Oh, did you change your mind...? Well then, have a nice day!")
theater()
| 32.011765
| 79
| 0.484748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,034
| 0.380007
|
a51f8b0f6e2a6c5f1924803b2a7a2c961da769d4
| 43,469
|
py
|
Python
|
TSScall-master/TSScall.py
|
AdelmanLab/GetGeneAnnotation_GGA
|
ae8c8328640892a4e50408ba566dd95e70f18d52
|
[
"MIT"
] | 1
|
2021-04-02T14:36:12.000Z
|
2021-04-02T14:36:12.000Z
|
TSScall-master/TSScall.py
|
AdelmanLab/GetGeneAnnotation_GGA
|
ae8c8328640892a4e50408ba566dd95e70f18d52
|
[
"MIT"
] | 3
|
2018-02-23T19:47:31.000Z
|
2019-07-15T16:58:54.000Z
|
TSScall-master/TSScall.py
|
AdelmanLab/GetGeneAnnotation_GGA
|
ae8c8328640892a4e50408ba566dd95e70f18d52
|
[
"MIT"
] | 1
|
2017-01-06T20:16:07.000Z
|
2017-01-06T20:16:07.000Z
|
#!/usr/bin/env python
# CREATED BY CHRISTOPHER LAVENDER
# BASED ON WORK BY ADAM BURKHOLDER
# INTEGRATIVE BIOINFORMATICS, NIEHS
# WORKING OBJECT ORIENTED VERSION
import os
import math
import argparse
import sys
from operator import itemgetter
def writeBedHeader(file_name, description, OUTPUT):
OUTPUT.write('track name="{}" description="{}"\n'.format(
file_name,
description,
))
# STRAND_STATUS IS USED TO DETERMINE IF STRAND IS USED IN SORT
def sortList(input_list, strand_status):
if strand_status == 'sort_by_strand':
return sorted(input_list, key=lambda k: (
k['strand'],
k['chromosome'],
k['start']
))
elif strand_status == 'ignore_strand':
return sorted(input_list, key=lambda k: (
k['chromosome'],
k['start']
))
# ENTRY 1 IS LESS THAN ENTRY 2?
def isLessThan(entry_1, entry_2):
for val in ['strand', 'chromosome', 'start']:
if entry_1[val] < entry_2[val]:
return True
elif entry_1[val] > entry_2[val]:
return False
return False
# ENTRY 1 IS WITHIN ENTRY 2?
def isWithin(entry_1, entry_2):
if entry_1['strand'] == entry_2['strand'] and\
entry_1['chromosome'] == entry_2['chromosome']:
if entry_1['start'] >= entry_2['start'] and\
entry_1['end'] <= entry_2['end']:
return True
return False
def getID(base_name, count):
max_entries = 999999
feature_name = base_name + '_'
for i in range(len(str(count)), len(str(max_entries))):
feature_name += '0'
feature_name += str(count)
return feature_name
def readInReferenceAnnotation(annotation_file):
reference_annotation = dict()
all_gtf_keys = []
with open(annotation_file) as f:
for line in f:
if not line.startswith('#'): # Check for headers
chromosome, source, feature, start, end, score, strand, \
frame, attributes = line.strip().split('\t')
if feature == 'transcript' or feature == 'exon':
keys = []
values = []
gtf_fields = dict()
for entry in attributes.split(';')[:-1]:
# Check for key-value pair
if len(entry.split('\"')) > 1:
keys.append(entry.split('\"')[0].strip())
values.append(entry.split('\"')[1].strip())
for key, value in zip(keys, values):
gtf_fields[key] = [value]
for key in keys:
if key not in all_gtf_keys:
all_gtf_keys.append(key)
tr_id = gtf_fields.pop('transcript_id')[0]
gene_id = gtf_fields.pop('gene_id')[0]
for val in ('transcript_id', 'gene_id'):
all_gtf_keys.remove(val)
if feature == 'exon':
ref_id = (tr_id, chromosome)
if ref_id not in reference_annotation:
reference_annotation[ref_id] = {
'chromosome': chromosome,
'strand': strand,
'exons': [],
'gene_id': gene_id,
'gtf_fields': gtf_fields,
}
reference_annotation[ref_id]['exons'].append(
[int(start), int(end)]
)
for ref_id in reference_annotation:
t = reference_annotation[ref_id]
# TAKE ADDITIONAL INFORMATION FROM EXON LISTS
t['exons'].sort(key=lambda x: x[0])
t['tr_start'] = t['exons'][0][0]
t['tr_end'] = t['exons'][len(t['exons'])-1][1]
if t['strand'] == '+':
t['tss'] = t['tr_start']
if t['strand'] == '-':
t['tss'] = t['tr_end']
t['gene_length'] = t['tr_end'] - t['tr_start']
# POPULATE MISSING GTF FIELD ENTRIES
for key in all_gtf_keys:
if key not in t['gtf_fields']:
t['gtf_fields'][key] = [None]
return reference_annotation, all_gtf_keys
class TSSCalling(object):
def __init__(self, **kwargs):
self.forward_bedgraph = kwargs['forward_bedgraph']
self.reverse_bedgraph = kwargs['reverse_bedgraph']
self.chrom_sizes = kwargs['chrom_sizes']
self.annotation_file = kwargs['annotation_file']
self.output_bed = kwargs['output_bed']
assert os.path.exists(self.forward_bedgraph)
assert os.path.exists(self.reverse_bedgraph)
assert os.path.exists(self.chrom_sizes)
if self.annotation_file:
assert os.path.exists(self.annotation_file)
self.fdr_threshold = kwargs['fdr']
self.false_positives = kwargs['false_positives']
self.utss_filter_size = kwargs['utss_filter_size']
self.utss_search_window = kwargs['utss_search_window']
self.bidirectional_threshold = kwargs['bidirectional_threshold']
self.cluster_threshold = kwargs['cluster_threshold']
self.detail_file = kwargs['detail_file']
self.cluster_bed = kwargs['cluster_bed']
self.call_method = kwargs['call_method']
self.annotation_join_distance = kwargs['annotation_join_distance']
self.annotation_search_window = kwargs['annotation_search_window']
self.bin_winner_size = kwargs['bin_winner_size']
self.set_read_threshold = kwargs['set_read_threshold']
try:
int(self.set_read_threshold)
except:
pass
else:
self.set_read_threshold = int(self.set_read_threshold)
# EVALUATE THRESHOLD METHOD ARGUMENTS; IF NONE, SET FDR_THRESHOLD
# AT 0.001
implied_threshold_methods = 0
for val in [
self.fdr_threshold,
self.false_positives,
self.set_read_threshold]:
implied_threshold_methods += int(bool(val))
if implied_threshold_methods == 1:
pass
elif implied_threshold_methods > 1:
raise ValueError('More than 1 read threshold method implied!!')
elif implied_threshold_methods == 0:
self.fdr_threshold = 0.001
self.tss_list = []
self.reference_annotation = None
self.gtf_attribute_fields = []
self.annotated_tss_count = 0
self.unannotated_tss_count = 0
self.tss_cluster_count = 0
self.unobserved_ref_count = 0
self.execute()
def createSearchWindowsFromAnnotation(self):
# VALUE USED TO MERGE SEARCH WINDOWS BY PROXIMITY
join_window = self.annotation_join_distance
window_size = self.annotation_search_window
current_entry = sorted(self.reference_annotation, key=lambda k: (
self.reference_annotation[k]['strand'],
self.reference_annotation[k]['chromosome'],
self.reference_annotation[k]['tss'],
# self.reference_annotation[k]['gene'],
k,
))
# POPULATE TRANSCRIPT LIST FROM SORTED LIST;
# ADD SEARCH WINDOW EDGES TO ENTRIES
transcript_list = []
for ref in current_entry:
transcript_list.append({
'transcript_id': [ref[0]],
'chromosome':
self.reference_annotation[ref]['chromosome'],
'tss': [self.reference_annotation[ref]['tss']],
'strand': self.reference_annotation[ref]['strand'],
'gene_id': [self.reference_annotation[ref]['gene_id']],
'hits': [],
'gtf_fields': self.reference_annotation[ref]['gtf_fields'],
})
if self.reference_annotation[ref]['strand'] == '+':
transcript_list[-1]['start'] = \
transcript_list[-1]['tss'][0] - window_size
# MAKE SURE WINDOW END DOES NOT GO PAST TRANSCRIPT END
end = transcript_list[-1]['tss'][0] + window_size
if end > self.reference_annotation[ref]['tr_end']:
transcript_list[-1]['end'] = \
self.reference_annotation[ref]['tr_end']
else:
transcript_list[-1]['end'] = end
elif self.reference_annotation[ref]['strand'] == '-':
# MAKE SURE WINDOW START DOES NOT GO PAST TRANSCRIPT START
start = transcript_list[-1]['tss'][0] - window_size
if start < self.reference_annotation[ref]['tr_start']:
transcript_list[-1]['start'] = \
self.reference_annotation[ref]['tr_end']
else:
transcript_list[-1]['start'] = start
transcript_list[-1]['end'] = \
transcript_list[-1]['tss'][0] + window_size
merged_windows = []
# MERGE WINDOWS BASED PROXIMITY;
# IF WINDOWS ARE WITHIN JOIN THRESHOLD, THEY ARE MERGED;
# IF NOT, BUT STILL OVERLAPPING, MIDPOINT BECOMES BOUNDARY
working_entry = transcript_list.pop(0)
while len(transcript_list) != 0:
next_entry = transcript_list.pop(0)
if (working_entry['strand'] == next_entry['strand']) and \
(working_entry['chromosome'] == next_entry['chromosome']):
if working_entry['tss'][-1] + join_window >= \
next_entry['tss'][0]:
working_entry['transcript_id'].append(
next_entry['transcript_id'][0]
)
working_entry['gene_id'].append(
next_entry['gene_id'][0]
)
for key in working_entry['gtf_fields']:
working_entry['gtf_fields'][key].append(
next_entry['gtf_fields'][key][0]
)
# working_entry['genes'].append(next_entry['genes'][0])
working_entry['end'] = next_entry['end']
working_entry['tss'].append(next_entry['tss'][0])
elif working_entry['end'] >= next_entry['start']:
working_entry['end'] = int(math.floor(
(working_entry['end']+next_entry['start'])/2
))
next_entry['start'] = working_entry['end'] + 1
merged_windows.append(working_entry)
working_entry = next_entry
else:
merged_windows.append(working_entry)
working_entry = next_entry
else:
merged_windows.append(working_entry)
working_entry = next_entry
merged_windows.append(working_entry)
return merged_windows
def combineAndSortBedGraphs(self, forward_bedgraph, reverse_bedgraph):
def readBedGraph(bedgraph_list, bedgraph_fn, strand):
with open(bedgraph_fn) as f:
for line in f:
if not ('track' in line or line == '\n'):
chromosome, start, end, reads = line.strip().split()
for i in range(int(start)+1, int(end)+1):
bedgraph_list.append({
'chromosome': chromosome,
'start': i,
'end': i,
'reads': int(reads),
'strand': strand
})
combined_list = []
readBedGraph(combined_list, forward_bedgraph, '+')
readBedGraph(combined_list, reverse_bedgraph, '-')
return sortList(combined_list, 'sort_by_strand')
# CONSIDERS TAB-DELIMITED CHROM_SIZES FILE (UCSC)
def findGenomeSize(self, chrom_sizes):
genome_size = 0
with open(chrom_sizes) as f:
for line in f:
genome_size += int(line.strip().split()[1])
return genome_size
# FIND THRESHOLD FOR TSS CALLING, BASED ON
# JOTHI ET AL. (2008) NUCLEIC ACIDS RES 36: 5221-5231.
def findReadThreshold(self, bedgraph_list, genome_size):
def countLoci(bedgraph_list, value):
loci = 0
for entry in bedgraph_list:
if entry['reads'] >= value:
loci += 1
return loci
if self.fdr_threshold or self.false_positives:
self.false_positives = 1
mappable_size = 0.8 * 2 * float(genome_size)
read_count = 0
for entry in bedgraph_list:
read_count += entry['reads']
expected_count = float(read_count)/mappable_size
cume_probability = ((expected_count**0)/math.factorial(0)) * \
math.exp(-expected_count)
threshold = 1
while True:
probability = 1 - cume_probability
expected_loci = probability * mappable_size
if self.fdr_threshold:
observed_loci = countLoci(bedgraph_list, threshold)
fdr = float(expected_loci)/float(observed_loci)
if fdr < self.fdr_threshold:
return threshold
else:
if expected_loci < self.false_positives:
return threshold
cume_probability += \
((expected_count**threshold)/math.factorial(threshold)) * \
math.exp(-expected_count)
threshold += 1
else:
return self.set_read_threshold
# FIND INTERSECTION WITH SEARCH_WINDOWS, BEDGRAPH_LIST;
# HITS ARE ADDED TO WINDOW_LIST, REQUIRES SORTED LIST
def findIntersectionWithBedGraph(self, search_windows, bedgraph_list):
search_index = 0
bedgraph_index = 0
while (search_index < len(search_windows)) and \
(bedgraph_index < len(bedgraph_list)):
if isWithin(bedgraph_list[bedgraph_index],
search_windows[search_index]):
search_windows[search_index]['hits'].append([
bedgraph_list[bedgraph_index]['start'],
bedgraph_list[bedgraph_index]['reads']
])
bedgraph_index += 1
else:
if isLessThan(bedgraph_list[bedgraph_index],
search_windows[search_index]):
bedgraph_index += 1
else:
search_index += 1
# CREATE WINDOWS ABOUT KNOWN TSS FOR UNANNOTATED TSSs CALLING;
# CONSIDERS ANNOTATED AND CALLED TSSs IN INSTANCE LISTS
def createFilterWindowsFromAnnotationAndCalledTSSs(self):
filter_windows = []
if self.reference_annotation:
for transcript in self.reference_annotation:
filter_windows.append({
'strand': self.reference_annotation[transcript]['strand'],
'chromosome':
self.reference_annotation[transcript]['chromosome'],
'start': self.reference_annotation[transcript]['tss'] -
self.utss_filter_size,
'end': self.reference_annotation[transcript]['tss'] +
self.utss_filter_size
})
if self.tss_list != []:
for tss in self.tss_list:
filter_windows.append({
'strand': tss['strand'],
'chromosome': tss['chromosome'],
'start': tss['start'] - self.utss_filter_size,
'end': tss['start'] + self.utss_filter_size
})
return sortList(filter_windows, 'sort_by_strand')
def filterBedGraphListByWindows(self, bedgraph_list, filter_windows):
# FILTER BY OVERLAP WITH FILTER WINDOWS
if filter_windows != []:
filter_index = 0
bedgraph_index = 0
working_list = []
while (filter_index < len(filter_windows)) and \
(bedgraph_index < len(bedgraph_list)):
if isWithin(bedgraph_list[bedgraph_index],
filter_windows[filter_index]):
bedgraph_index += 1
else:
if isLessThan(bedgraph_list[bedgraph_index],
filter_windows[filter_index]):
working_list.append(bedgraph_list[bedgraph_index])
bedgraph_index += 1
else:
filter_index += 1
bedgraph_list = working_list
return bedgraph_list
# CREATES WINDOWS FOR UNANNOTATED TSS CALLING
def createUnannotatedSearchWindowsFromBedgraph(self,
bedgraph_list,
read_threshold):
windows = []
for entry in bedgraph_list:
if entry['reads'] > read_threshold:
windows.append({
'strand': entry['strand'],
'chromosome': entry['chromosome'],
'start': entry['start'] - self.utss_search_window,
'end': entry['end'] + self.utss_search_window,
'hits': []
})
# MERGE OVERLAPPING WINDOWS
merged_windows = []
working_entry = windows.pop(0)
while len(windows) != 0:
next_entry = windows.pop(0)
if (working_entry['strand'] == next_entry['strand']) and\
(working_entry['chromosome'] == next_entry['chromosome']):
if working_entry['end'] >= next_entry['start']:
working_entry['end'] = next_entry['end']
else:
merged_windows.append(working_entry)
working_entry = next_entry
else:
merged_windows.append(working_entry)
working_entry = next_entry
return merged_windows
# SORT CALLED TSSs AND ASSOCIATE INTO BIDIRECTIONAL PAIRS
def associateBidirectionalTSSs(self):
self.tss_list = sortList(self.tss_list, 'ignore_strand')
for i in range(len(self.tss_list)-1):
if self.tss_list[i]['chromosome'] == \
self.tss_list[i+1]['chromosome']:
if self.tss_list[i]['strand'] == '-' and \
self.tss_list[i+1]['strand'] == '+':
if self.tss_list[i]['start'] + \
self.bidirectional_threshold >= \
self.tss_list[i+1]['start']:
distance = abs(self.tss_list[i]['start'] -
self.tss_list[i+1]['start'])
self.tss_list[i]['divergent partner'] = \
self.tss_list[i+1]['id']
self.tss_list[i+1]['divergent partner'] = \
self.tss_list[i]['id']
self.tss_list[i]['divergent distance'] = distance
self.tss_list[i+1]['divergent distance'] = distance
if self.tss_list[i]['strand'] == '+' and \
self.tss_list[i+1]['strand'] == '-':
if self.tss_list[i]['start'] + \
self.bidirectional_threshold >= \
self.tss_list[i+1]['start']:
distance = abs(self.tss_list[i]['start'] -
self.tss_list[i+1]['start'])
self.tss_list[i]['convergent partner'] = \
self.tss_list[i+1]['id']
self.tss_list[i+1]['convergent partner'] = \
self.tss_list[i]['id']
self.tss_list[i]['convergent distance'] = distance
self.tss_list[i+1]['convergent distance'] = distance
def findTSSExonIntronOverlap(self):
exons = []
introns = []
if self.reference_annotation:
for transcript in self.reference_annotation:
for i in range(len(
self.reference_annotation[transcript]['exons'])):
strand = self.reference_annotation[transcript]['strand']
chromosome =\
self.reference_annotation[transcript]['chromosome']
start =\
self.reference_annotation[transcript]['exons'][i][0]
end = self.reference_annotation[transcript]['exons'][i][1]
exons.append({
'strand': strand,
'chromosome': chromosome,
'start': start,
'end': end
})
for i in range(
len(self.reference_annotation[transcript]['exons'])-1):
strand = self.reference_annotation[transcript]['strand']
chromosome =\
self.reference_annotation[transcript]['chromosome']
start = \
self.reference_annotation[transcript]['exons'][i][1]+1
end = \
self.reference_annotation[transcript]['exons'][i+1][0]\
- 1
introns.append({
'strand': strand,
'chromosome': chromosome,
'start': start,
'end': end
})
exons = sortList(exons, 'sort_by_strand')
introns = sortList(introns, 'sort_by_strand')
self.tss_list = sortList(self.tss_list, 'sort_by_strand')
def findFeatureOverlap(tss_list, feature_list, feature_key):
if feature_list == []:
for tss in tss_list:
tss[feature_key] = False
else:
feature_index = 0
tss_index = 0
while (feature_index < len(feature_list)) and\
(tss_index < len(tss_list)):
if isWithin(tss_list[tss_index],
feature_list[feature_index]):
tss_list[tss_index][feature_key] = True
tss_index += 1
else:
if isLessThan(tss_list[tss_index],
feature_list[feature_index]):
tss_list[tss_index][feature_key] = False
tss_index += 1
else:
feature_index += 1
findFeatureOverlap(self.tss_list, exons, 'exon_overlap')
findFeatureOverlap(self.tss_list, introns, 'intron_overlap')
# ASSOCIATE TSSs INTO CLUSTERS BY PROXIMITY;
# ADD TSS CLUSTER AND NUMBER OF TSSs IN ASSOCIATED CLUSTER IN TSS ENTRY
def associateTSSsIntoClusters(self):
cluster_count = dict()
self.tss_list = sortList(self.tss_list, 'ignore_strand')
current_cluster = getID('cluster', self.tss_cluster_count)
self.tss_cluster_count += 1
self.tss_list[0]['cluster'] = current_cluster
cluster_count[current_cluster] = 1
for i in range(1, len(self.tss_list)):
if not (self.tss_list[i-1]['chromosome'] ==
self.tss_list[i]['chromosome'] and
self.tss_list[i-1]['start'] + self.cluster_threshold >=
self.tss_list[i]['start']):
current_cluster = getID('cluster', self.tss_cluster_count)
self.tss_cluster_count += 1
self.tss_list[i]['cluster'] = current_cluster
if current_cluster not in cluster_count:
cluster_count[current_cluster] = 1
else:
cluster_count[current_cluster] += 1
for tss in self.tss_list:
tss['cluster_count'] = cluster_count[tss['cluster']]
def createDetailFile(self):
def checkHits(window):
for hit in window['hits']:
if hit[1] >= self.read_threshold:
return True
return False
def writeUnobservedEntry(OUTPUT, tss, tr_ids, gene_ids, window):
tss_id = getID('annoTSS', self.unobserved_ref_count)
self.unobserved_ref_count += 1
transcripts = tr_ids[0]
genes = gene_ids[0]
for i in range(1, len(tr_ids)):
transcripts += ';' + tr_ids[i]
genes += ';' + gene_ids[i]
reads = 0
for hit in window['hits']:
if int(tss) == int(hit[0]):
reads = hit[1]
OUTPUT.write(('{}' + '\t{}' * 15)
.format(
tss_id,
'unobserved reference TSS',
transcripts,
genes,
window['strand'],
window['chromosome'],
str(tss),
str(reads),
'NA',
'NA',
'NA',
'NA',
'NA',
'NA',
'NA',
'NA',
'NA',
))
for key in self.gtf_attribute_fields:
# OUTPUT.write('\t' + ';'.join(window['gtf_fields'][key]))
OUTPUT.write('\t' + ';'.join(['None' if v is None else v for
v in window['gtf_fields'][key]]))
OUTPUT.write('\n')
# self.findTSSExonIntronOverlap()
# self.associateTSSsIntoClusters()
# Remove GTF fields 'exon_number' and 'exon_id' if present
skip_fields = ['exon_number', 'exon_id']
for entry in skip_fields:
if entry in self.gtf_attribute_fields:
self.gtf_attribute_fields.remove(entry)
with open(self.detail_file, 'w') as OUTPUT:
OUTPUT.write(
('{}' + '\t{}' * 15)
.format(
'TSS ID',
'Type',
'Transcripts',
'Gene ID',
'Strand',
'Chromosome',
'Position',
'Reads',
'Divergent?',
'Divergent partner',
'Divergent distance',
'Convergent?',
'Convergent partner',
'Convergent distance',
'TSS cluster',
'TSSs in associated cluster',
))
for field in self.gtf_attribute_fields:
OUTPUT.write('\t' + field)
OUTPUT.write('\n')
for tss in self.tss_list:
OUTPUT.write(tss['id'])
OUTPUT.write('\t' + tss['type'])
for key in ('transcript_id', 'gene_id'):
if key in tss:
OUTPUT.write('\t' + ';'.join(tss[key]))
else:
OUTPUT.write('\tNA')
for entry in ['strand', 'chromosome', 'start', 'reads']:
OUTPUT.write('\t' + str(tss[entry]))
if 'divergent partner' in tss:
OUTPUT.write('\t{}\t{}\t{}'.format(
'True',
tss['divergent partner'],
str(tss['divergent distance']),
))
else:
OUTPUT.write('\tFalse\tNA\tNA')
if 'convergent partner' in tss:
OUTPUT.write('\t{}\t{}\t{}'.format(
'True',
tss['convergent partner'],
str(tss['convergent distance']),
))
else:
OUTPUT.write('\tFalse\tNA\tNA')
# OUTPUT.write('\t' + str(
# tss['exon_overlap'] or tss['intron_overlap']))
for entry in [
'cluster',
'cluster_count']:
OUTPUT.write('\t' + str(tss[entry]))
if 'gtf_fields' in tss:
for key in self.gtf_attribute_fields:
# OUTPUT.write('\t' + ';'.join(tss['gtf_fields'][key]))
OUTPUT.write('\t' + ';'.join(
['None' if v is None else
v for v in tss['gtf_fields'][key]]
))
else:
for key in self.gtf_attribute_fields:
OUTPUT.write('\tNA')
OUTPUT.write('\n')
if self.annotation_file:
for window in self.ref_search_windows:
if not checkHits(window):
window_tss = []
for tr_id, gene_id, tss in zip(window['transcript_id'],
window['gene_id'],
window['tss']):
window_tss.append({
'transcript_id': tr_id,
'gene_id': gene_id,
'tss': int(tss),
})
window_tss.sort(key=itemgetter('tss'))
current_tss = window_tss[0]['tss']
current_tr_ids = [window_tss[0]['transcript_id']]
current_genes = [window_tss[0]['gene_id']]
window_index = 1
while window_index < len(window_tss):
if current_tss == window_tss[window_index]['tss']:
current_tr_ids.append(
window_tss[window_index]['transcript_id'])
current_genes.append(
window_tss[window_index]['gene_id'])
else:
writeUnobservedEntry(OUTPUT, current_tss,
current_tr_ids,
current_genes,
window)
current_tss = window_tss[window_index]['tss']
current_tr_ids = \
[window_tss[window_index]['transcript_id']]
current_genes = [window_tss[0]['gene_id']]
window_index += 1
writeUnobservedEntry(OUTPUT, current_tss,
current_tr_ids, current_genes,
window)
def writeClusterBed(self, tss_list, cluster_bed):
clusters = dict()
with open(cluster_bed, 'w') as OUTPUT:
writeBedHeader(
cluster_bed.split('.bed')[0],
'TSScall clusters',
OUTPUT,
)
for tss in tss_list:
if tss['cluster'] in clusters:
clusters[tss['cluster']]['tss'].append(tss['start'])
else:
clusters[tss['cluster']] = {
'chromosome': tss['chromosome'],
'tss': [tss['start']],
}
for cluster in sorted(clusters):
tss = sorted(clusters[cluster]['tss'])
OUTPUT.write('{}\t{}\t{}\t{}\n'.format(
clusters[cluster]['chromosome'],
str(tss[0] - 1),
str(tss[-1]),
cluster,
))
def writeBedFile(self, tss_list, output_bed):
with open(output_bed, 'w') as OUTPUT:
writeBedHeader(
output_bed.split('.bed')[0],
'TSScall TSSs',
OUTPUT,
)
for tss in tss_list:
OUTPUT.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format(
tss['chromosome'],
str(tss['start'] - 1),
str(tss['start']),
tss['id'],
'0',
tss['strand']
))
# FROM HITS IN SEARCH WINDOWS, CALL TSSs
# COUNT IS RETURNED IN ORDER TO UPDATE INSTANCE VARIABLES
def callTSSsFromIntersection(self, intersection, read_threshold, base_name,
count, tss_type, nearest_allowed):
def callTSS(hits, strand):
if self.call_method == 'global':
max_reads = float('-inf')
max_position = None
for hit in hits:
if hit[1] > max_reads:
max_position = hit[0]
max_reads = hit[1]
elif hit[1] == max_reads:
if strand == '+':
if hit[0] < max_position:
max_position = hit[0]
elif strand == '-':
if hit[0] > max_position:
max_position = hit[0]
return max_position, max_reads
if self.call_method == 'bin_winner':
bin_size = self.bin_winner_size
bins = []
# MAKE BINS
hits.sort(key=itemgetter(0))
for i in range(len(hits)):
bins.append({
'total_reads': 0,
'bin_hits': []
})
for j in range(i, len(hits)):
if abs(hits[i][0] - hits[j][0]) <= bin_size:
bins[-1]['total_reads'] += hits[j][1]
bins[-1]['bin_hits'].append(hits[j])
# SELECT BIN WITH HIGHEST TOTAL READS
# BECAUSE SORTED, WILL TAKE UPSTREAM BIN IN TIES
max_bin_reads = float('-inf')
max_bin_index = None
for i, entry in enumerate(bins):
if entry['total_reads'] > max_bin_reads:
max_bin_index = i
max_bin_reads = entry['total_reads']
# GET LOCAL WINNER
# BECAUSE SORTED, WILL TAKE UPSTREAM TSS IN TIES
max_reads = float('-inf')
max_position = None
for hit in bins[max_bin_index]['bin_hits']:
if hit[1] > max_reads:
max_position = hit[0]
max_reads = hit[1]
return max_position, max_reads
# ITERATE THROUGH WINDOWS IN INTERSECTION
for entry in intersection:
entry_hits = entry['hits']
# LOOP WHILE 'HITS' IS POPULATED
while len(entry_hits) != 0:
# CALL A TSS
tss_position, tss_reads = callTSS(entry_hits, entry['strand'])
if tss_reads >= read_threshold:
self.tss_list.append({
'id': getID(base_name, count),
'type': tss_type,
'start': tss_position,
'end': tss_position,
'reads': tss_reads,
})
# IF VAL IN ENTRY, ADD TO DICT IN TSS LIST
for val in ['transcript_id', 'gene_id', 'strand',
'chromosome', 'gtf_fields']:
if val in entry:
self.tss_list[-1][val] = entry[val]
count += 1
# GO THROUGH HITS, KEEP THOSE WITHIN NEAREST_ALLOWED
temp = []
for hit in entry_hits:
if abs(hit[0] - tss_position) > nearest_allowed:
temp.append(hit)
entry_hits = temp
return count
def callTSSsFromAnnotation(self, bedgraph_list, read_threshold):
self.ref_search_windows = self.createSearchWindowsFromAnnotation()
self.findIntersectionWithBedGraph(self.ref_search_windows,
bedgraph_list)
self.annotated_tss_count = self.callTSSsFromIntersection(
self.ref_search_windows,
read_threshold,
'obsTSS',
self.annotated_tss_count,
'called from reference window',
float('inf')
)
def callUnannotatedTSSs(self, bedgraph_list, read_threshold):
filter_windows = self.createFilterWindowsFromAnnotationAndCalledTSSs()
filtered_bedgraph = self.filterBedGraphListByWindows(bedgraph_list,
filter_windows)
unannotated_search_windows =\
self.createUnannotatedSearchWindowsFromBedgraph(filtered_bedgraph,
read_threshold)
self.findIntersectionWithBedGraph(unannotated_search_windows,
filtered_bedgraph)
self.unannotated_tss_count = self.callTSSsFromIntersection(
unannotated_search_windows,
read_threshold,
'uTSS',
self.unannotated_tss_count,
'unannotated',
self.utss_search_window
)
def execute(self):
sys.stdout.write('Reading in bedGraph files...\n')
bedgraph_list = self.combineAndSortBedGraphs(self.forward_bedgraph,
self.reverse_bedgraph)
genome_size = self.findGenomeSize(self.chrom_sizes)
sys.stdout.write('Calculating read threshold...\n')
self.read_threshold = \
self.findReadThreshold(bedgraph_list, genome_size)
sys.stdout.write('Read threshold set to {}\n'.format(
str(self.read_threshold)))
if self.annotation_file:
sys.stdout.write('Reading in annotation file...\n')
self.reference_annotation, self.gtf_attribute_fields =\
readInReferenceAnnotation(self.annotation_file)
sys.stdout.write('Calling TSSs from annotation...\n')
self.callTSSsFromAnnotation(bedgraph_list, self.read_threshold)
sys.stdout.write('{} TSSs called from annotation\n'.format(
str(self.annotated_tss_count)))
sys.stdout.write('Calling unannotated TSSs...\n')
self.callUnannotatedTSSs(bedgraph_list, self.read_threshold)
sys.stdout.write('{} unannotated TSSs called\n'.format(
str(self.unannotated_tss_count)))
sys.stdout.write('Associating bidirectional TSSs...\n')
self.associateBidirectionalTSSs()
self.associateTSSsIntoClusters()
if self.detail_file:
sys.stdout.write('Creating detail file...\n')
self.createDetailFile()
if self.cluster_bed:
sys.stdout.write('Creating cluster bed...\n')
self.writeClusterBed(self.tss_list, self.cluster_bed)
sys.stdout.write('Creating output bed...\n')
self.writeBedFile(self.tss_list, self.output_bed)
sys.stdout.write('TSS calling complete\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--fdr', default=None, type=float,
help='set read threshold by FDR (FLOAT) (Default \
method: less than 0.001)')
parser.add_argument('--false_positives', default=None, type=int,
help='set read threshold by false positive count')
parser.add_argument('--utss_filter_size', default=750, type=int,
help='set uTSS filter size; any read within INTEGER \
of obsTSS/annoTSS is filtered prior to uTSS calling \
(Default: 750)')
parser.add_argument('--utss_search_window', default=250, type=int,
help='set uTSS search window size to INTEGER \
(Default: 250)')
parser.add_argument('--bidirectional_threshold', default=1000, type=int,
help='INTEGER threshold to associate bidirectional \
TSSs (Default: 1000)')
parser.add_argument('--detail_file', default=None, type=str,
help='create a tab-delimited TXT file with details \
about TSS calls')
parser.add_argument('--cluster_threshold', default=1000, type=int,
help='INTEGER threshold to associate TSSs into \
clusters (Default: 1000)')
parser.add_argument('--annotation_file', '-a', type=str,
help='annotation in GTF format')
parser.add_argument('--call_method', type=str, default='bin_winner',
choices=['global', 'bin_winner'],
help='TSS calling method to use (Default: bin_winner)')
parser.add_argument('--annotation_join_distance', type=int, default=200,
help='set INTEGER distace threshold for joining search \
windows from annotation (Default: 200)')
parser.add_argument('--annotation_search_window', type=int, default=1000,
help='set annotation search window size to INTEGER \
(Default: 1000)')
parser.add_argument('--set_read_threshold', type=float, default=None,
help='set read threshold for TSS calling to FLOAT; do \
not determine threshold from data')
parser.add_argument('--bin_winner_size', type=int, default=200,
help='set bin size for call method bin_winner \
(Default: 200)')
parser.add_argument('--cluster_bed', type=str, default=None,
help='write clusters to output bed file')
parser.add_argument('forward_bedgraph', type=str,
help='forward strand Start-seq bedgraph file')
parser.add_argument('reverse_bedgraph', type=str,
help='reverse strand Start-seq bedgraph file')
parser.add_argument('chrom_sizes', type=str,
help='standard tab-delimited chromosome sizes file')
parser.add_argument('output_bed', type=str, help='output TSS BED file')
args = parser.parse_args()
TSSCalling(**vars(args))
| 43.996964
| 80
| 0.494835
| 35,901
| 0.825899
| 0
| 0
| 0
| 0
| 0
| 0
| 8,240
| 0.18956
|
a51fd6b2b0c4c430c0e920bd959a2e1d06f3221b
| 234
|
py
|
Python
|
grayToBinary.py
|
gaurav3dua/OpenCV
|
d816158c40c35b897ce9873c176ce72735220069
|
[
"MIT"
] | 1
|
2018-11-25T19:30:22.000Z
|
2018-11-25T19:30:22.000Z
|
grayToBinary.py
|
gaurav3dua/OpenCV
|
d816158c40c35b897ce9873c176ce72735220069
|
[
"MIT"
] | null | null | null |
grayToBinary.py
|
gaurav3dua/OpenCV
|
d816158c40c35b897ce9873c176ce72735220069
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
img = cv2.imread('lena.jpg', cv2.IMREAD_GRAYSCALE)
thresh = 127
im_bw = cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY)[1]
cv2.imshow('image', im_bw)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 21.272727
| 62
| 0.713675
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 17
| 0.07265
|
a52068720298fd51fbb513a22dc8a2e7f0bdd3f1
| 652
|
py
|
Python
|
006-argparse.py
|
KitchenTableCoders/cli-video
|
35cacc059f6ac86c7bf6b1f86f42ea178e16165c
|
[
"MIT"
] | 6
|
2016-03-06T05:51:06.000Z
|
2017-01-10T05:49:03.000Z
|
006-argparse.py
|
KitchenTableCoders/cli-video
|
35cacc059f6ac86c7bf6b1f86f42ea178e16165c
|
[
"MIT"
] | null | null | null |
006-argparse.py
|
KitchenTableCoders/cli-video
|
35cacc059f6ac86c7bf6b1f86f42ea178e16165c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Introduces the "argparse" module, which is used to parse more complex argument strings
eg: ./006-argparse.py --name Jeff mauve
"""
import argparse # http://docs.python.org/2/library/argparse.html#module-argparse
import subprocess
def main():
parser = argparse.ArgumentParser(description='Say a sentence')
parser.add_argument('--name', type=str, help='a name')
parser.add_argument('color', type=str, nargs='+', help='a color') # nargs='+' means "at least one"
args = parser.parse_args()
cmd = 'say {0} likes {1}'.format(args.name, args.color[0])
subprocess.call(cmd, shell=True)
if __name__ == '__main__':
main()
| 28.347826
| 99
| 0.707055
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 332
| 0.509202
|
a5209d004c35406d08483e6a8a94534fc1c1b17b
| 4,573
|
py
|
Python
|
solid_attenuator/ioc_lfe_at2l0_calc/at2l0.py
|
ZLLentz/solid-attenuator
|
766ac1df169b3b9459222d979c9ef77a9be2b509
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2021-04-21T02:55:11.000Z
|
2021-04-21T02:55:11.000Z
|
solid_attenuator/ioc_lfe_at2l0_calc/at2l0.py
|
ZLLentz/solid-attenuator
|
766ac1df169b3b9459222d979c9ef77a9be2b509
|
[
"BSD-3-Clause-LBNL"
] | 27
|
2020-12-07T23:11:42.000Z
|
2022-02-02T23:59:03.000Z
|
solid_attenuator/ioc_lfe_at2l0_calc/at2l0.py
|
ZLLentz/solid-attenuator
|
766ac1df169b3b9459222d979c9ef77a9be2b509
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2020-04-01T05:52:03.000Z
|
2020-07-24T16:56:36.000Z
|
"""
This is the IOC source code for the unique AT2L0, with its 18 in-out filters.
"""
from typing import List
from caproto.server import SubGroup, expand_macros
from caproto.server.autosave import RotatingFileManager
from .. import calculator, util
from ..filters import InOutFilterGroup
from ..ioc import IOCBase
from ..system import SystemGroupBase
from ..util import State
class SystemGroup(SystemGroupBase):
"""
PV group for attenuator system-spanning information.
This system group implementation is specific to AT2L0.
"""
@property
def material_order(self) -> List[str]:
"""Material prioritization."""
# Hard-coded for now.
return ['C', 'Si']
def check_materials(self) -> bool:
"""Ensure the materials specified are OK according to the order."""
bad_materials = set(self.material_order).symmetric_difference(
set(self.all_filter_materials)
)
if bad_materials:
self.log.error(
'Materials not set properly! May not calculate correctly. '
'Potentially bad materials: %s', bad_materials
)
return not bool(bad_materials)
@util.block_on_reentry()
async def run_calculation(self, energy: float, desired_transmission: float,
calc_mode: str
) -> calculator.Config:
if not self.check_materials():
raise util.MisconfigurationError(
f"Materials specified outside of supported ones. AT2L0 "
f"requires that diamond filters be inserted prior to silicon "
f"filters, but the following were found:"
f"{self.all_filter_materials}"
)
# Update all of the filters first, to determine their transmission
# at this energy
stuck = self.get_filters(stuck=True, inactive=False, normal=False)
filters = self.get_filters(stuck=False, inactive=False, normal=True)
materials = list(flt.material.value for flt in filters)
transmissions = list(flt.transmission.value for flt in filters)
for filter in stuck + filters:
await filter.set_photon_energy(energy)
# Account for stuck filters when calculating desired transmission:
stuck_transmission = self.calculate_stuck_transmission()
adjusted_tdes = desired_transmission / stuck_transmission
# Using the above-calculated transmissions, find the best configuration
config = calculator.get_best_config_with_material_priority(
materials=materials,
transmissions=transmissions,
material_order=self.material_order,
t_des=adjusted_tdes,
mode=calc_mode,
)
filter_to_state = {
flt: State.from_filter_index(idx)
for flt, idx in zip(filters, config.filter_states)
}
filter_to_state.update(
{flt: flt.get_stuck_state() for flt in stuck}
)
# Reassemble filter states in order:
config.filter_states = [
# Inactive filters will be implicitly marked as "Out" here.
filter_to_state.get(flt, State.Out)
for flt in self.filters.values()
]
# Include the stuck transmission in the result:
config.transmission *= stuck_transmission
return config
def create_ioc(prefix, filter_group, macros, **ioc_options):
"""IOC Setup."""
filter_index_to_attribute = {
index: f'filter_{suffix}'
for index, suffix in filter_group.items()
}
subgroups = {
filter_index_to_attribute[index]: SubGroup(
InOutFilterGroup, prefix=f':FILTER:{suffix}:', index=index)
for index, suffix in filter_group.items()
}
subgroups['sys'] = SubGroup(SystemGroup, prefix=':SYS:')
low_index = min(filter_index_to_attribute)
high_index = max(filter_index_to_attribute)
motor_prefix = expand_macros(macros["motor_prefix"], macros)
motor_prefixes = {
idx: f'{motor_prefix}{idx:02d}:STATE'
for idx in range(low_index, high_index + 1)
}
IOCMain = IOCBase.create_ioc_class(filter_index_to_attribute, subgroups,
motor_prefixes)
ioc = IOCMain(prefix=prefix, macros=macros, **ioc_options)
autosave_path = expand_macros(macros['autosave_path'], macros)
ioc.autosave_helper.filename = autosave_path
ioc.autosave_helper.file_manager = RotatingFileManager(autosave_path)
return ioc
| 36.293651
| 79
| 0.650776
| 3,032
| 0.663022
| 0
| 0
| 2,367
| 0.517603
| 2,190
| 0.478898
| 1,106
| 0.241854
|
a520cc9aad5c8512bee199a8b970862484795d67
| 4,530
|
py
|
Python
|
mysite/urls.py
|
jtkim03/Find-a-QT
|
a330c95f76bcc148febf39284c07d3ac4f909b4e
|
[
"BSD-3-Clause"
] | null | null | null |
mysite/urls.py
|
jtkim03/Find-a-QT
|
a330c95f76bcc148febf39284c07d3ac4f909b4e
|
[
"BSD-3-Clause"
] | 9
|
2021-03-30T13:42:35.000Z
|
2022-03-12T00:36:19.000Z
|
mysite/urls.py
|
jtkim03/Find-a-QT
|
a330c95f76bcc148febf39284c07d3ac4f909b4e
|
[
"BSD-3-Clause"
] | null | null | null |
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, include, re_path
from find_a_qt.views import home, QuestionListView, \
QuestionDetailView, question_post, answer_post, room_post, \
AnswerListView, user_history, UserQuestionView, question_answers, upvote_question_detail,\
upvote_answer_question, downvote_question_detail, downvote_answer_question
from django.views.generic import TemplateView
from users import views as user_views
from find_a_qt import views as find_a_qt_views
from django.conf import settings
from django.conf.urls.static import static
from chat.models import Room
from find_a_qt.models import Question
urlpatterns = [
path('',TemplateView.as_view(template_name = 'find_a_qt/home.html'), name='faqt-home'), #TODO Merge this login template with homepage
path('admin/', admin.site.urls),
url(r'^', include('chat.urls')),
path('accounts/', include('allauth.urls')),
path('about/', TemplateView.as_view(template_name = 'find_a_qt/about.html')),
path('register/', user_views.register, name='register'),
path('login/', auth_views.LoginView.as_view(template_name = 'users/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name = 'users/logout.html'), name = 'logout'),
path('profile/', user_views.view_profile, name='profile'),
url(r'^profile/(?P<pk>\d+)/$', user_views.view_profile, name='profile_with_pk'),
path('profile/edit/', user_views.edit_profile, name='edit_profile'),
#path('profile/MyQuestions/', UserQuestionView.as_view(), name='myqs'),
url(r'^profile/(?P<username>\w+)/$', user_views.profile_page, name='public_profile'),
path('questions/', QuestionListView.as_view(), name='viewquestions'),
path('answers/', AnswerListView.as_view(), name='viewanswers'),
path('questions/new/', question_post, name='createquestions'),
path('questions/<int:pk>/', QuestionDetailView.as_view(), name = 'viewquestions-detail'),
path('choose_question', TemplateView.as_view(template_name = 'find_a_qt/choose_question.html')),
path('questions/search/', TemplateView.as_view(template_name = 'find_a_qt/search_question.html'), name = 'search'),
path('s/', find_a_qt_views.search_view, name = 'search'),
path('answer/new/', answer_post, name='createqs'),
path('chat/new/', room_post, name='createroom'),
path('reset-password/', auth_views.PasswordResetView.as_view(), name='reset_password'),
path('reset-password/done/', auth_views.PasswordResetDoneView.as_view(), name='password_reset_done'),
re_path(r'^reset-password/confirm/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,23})/$',
auth_views.PasswordResetConfirmView.as_view(), name='password_reset_confirm'),
path('reset-password/complete/', auth_views.PasswordResetCompleteView.as_view(), name='password_reset_complete'),
path('my-questions/', user_history, name='user_question'),
# path('answer-question/',question_answers,name='answer_question'),
path('answers/<int:pk>/',question_answers,name='answer_question'),
url(r'^like/(?P<username>\w+)/$', user_views.like, name='like'),
url(r'^dislike/(?P<username>\w+)/$', user_views.dislike, name='dislike'),
url(r'^upvote_q_d/(?P<answer_id>\d+)/(?P<pk>\d+)/$', upvote_question_detail, name='upvote_question_detail'),
url(r'^upvote_a_q/(?P<answer_id>\d+)/(?P<pk>\d+)/$', upvote_answer_question, name='upvote_answer_question'),
url(r'^downvote_q_d/(?P<answer_id>\d+)/(?P<pk>\d+)/$', downvote_question_detail, name='downvote_question_detail'),
url(r'^downvote_a_q/(?P<answer_id>\d+)/(?P<pk>\d+)/$', downvote_answer_question, name='downvote_answer_question'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 56.625
| 137
| 0.722296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,139
| 0.472185
|
a5212eabcb877d5b4c6f79a259ed99bcf35ed6f2
| 396
|
py
|
Python
|
app/forms/login.py
|
mkorcha/CoyoteLab
|
8932d9cc35fb840e468368c2e1249ca4811b59d0
|
[
"MIT"
] | 2
|
2016-12-01T00:10:46.000Z
|
2016-12-31T19:18:35.000Z
|
app/forms/login.py
|
mkorcha/CoyoteLab
|
8932d9cc35fb840e468368c2e1249ca4811b59d0
|
[
"MIT"
] | null | null | null |
app/forms/login.py
|
mkorcha/CoyoteLab
|
8932d9cc35fb840e468368c2e1249ca4811b59d0
|
[
"MIT"
] | null | null | null |
from flask_wtf import Form
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired
class LoginForm(Form):
'''
Form used to perform a user login
'''
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Log In', validators=[DataRequired()])
| 28.285714
| 66
| 0.765152
| 261
| 0.659091
| 0
| 0
| 0
| 0
| 0
| 0
| 71
| 0.179293
|
a522e39d60daf369e5808e7febdbf847f905a859
| 880
|
py
|
Python
|
budgetcalc/admin.py
|
MAPC/MBTA
|
a1e669004509832a42ca49ef4d7d06d05e3a88fd
|
[
"BSD-3-Clause"
] | null | null | null |
budgetcalc/admin.py
|
MAPC/MBTA
|
a1e669004509832a42ca49ef4d7d06d05e3a88fd
|
[
"BSD-3-Clause"
] | null | null | null |
budgetcalc/admin.py
|
MAPC/MBTA
|
a1e669004509832a42ca49ef4d7d06d05e3a88fd
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from budgetcalc.models import Category, Optiongroup, Option, Submission
class CategoryAdmin(admin.ModelAdmin):
list_display = ('pk','title', 'cat_type', 'order',)
list_editable = ('title', 'cat_type', 'order',)
class OptiongroupAdmin(admin.ModelAdmin):
list_display = ('pk', 'title', 'form_type',)
list_editable = ('title', 'form_type',)
class OptionAdmin(admin.ModelAdmin):
list_display = ('pk', 'title', 'amount', 'category', 'optiongroup', 'parent', 'order',)
list_editable = ('title', 'amount', 'category', 'optiongroup', 'parent', 'order',)
class SubmissionAdmin(admin.ModelAdmin):
list_display = ('email', 'budget', )
admin.site.register(Category, CategoryAdmin)
admin.site.register(Optiongroup, OptiongroupAdmin)
admin.site.register(Option, OptionAdmin)
admin.site.register(Submission, SubmissionAdmin)
| 32.592593
| 91
| 0.718182
| 576
| 0.654545
| 0
| 0
| 0
| 0
| 0
| 0
| 217
| 0.246591
|
a52614c1f178a95384236fabe39f5251f3b714f1
| 117
|
py
|
Python
|
USP_Curso/Semana3/Exercicio4.py
|
IagoAntunes/Python__learning
|
cb96a1ae902c290270479c7a7f4e97b56c538297
|
[
"MIT"
] | null | null | null |
USP_Curso/Semana3/Exercicio4.py
|
IagoAntunes/Python__learning
|
cb96a1ae902c290270479c7a7f4e97b56c538297
|
[
"MIT"
] | null | null | null |
USP_Curso/Semana3/Exercicio4.py
|
IagoAntunes/Python__learning
|
cb96a1ae902c290270479c7a7f4e97b56c538297
|
[
"MIT"
] | null | null | null |
num = int(input("Digite um numero: "))
if(num % 5 == 0 and num % 3 == 0):
print("FizzBuzz")
else:
print(num)
| 19.5
| 38
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 0.25641
|
a5269b57489ba167b642f9b77a2ffe4af1010998
| 1,777
|
py
|
Python
|
tests/integration/cli/test_compile.py
|
miohtama/ape
|
622deb25076d33de0edb3a23449ccdc04c3288cd
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/cli/test_compile.py
|
miohtama/ape
|
622deb25076d33de0edb3a23449ccdc04c3288cd
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/cli/test_compile.py
|
miohtama/ape
|
622deb25076d33de0edb3a23449ccdc04c3288cd
|
[
"Apache-2.0"
] | null | null | null |
def test_compile(ape_cli, runner, project):
if not (project.path / "contracts").exists():
result = runner.invoke(ape_cli, ["compile"])
assert result.exit_code == 0
assert "WARNING" in result.output
assert "No 'contracts/' directory detected" in result.output
return # Nothing else to test for this project
if ".test" in project.extensions_with_missing_compilers:
result = runner.invoke(ape_cli, ["compile"])
assert result.exit_code == 0
assert "WARNING: No compilers detected for the " "following extensions:" in result.output
assert ".test" in result.output
assert ".foobar" in result.output
result = runner.invoke(ape_cli, ["compile", "contracts/Contract.test"])
assert result.exit_code == 0
assert (
"WARNING: No compilers detected for the " "following extensions: .test"
) in result.output
return # Nothing else to test for this project
result = runner.invoke(ape_cli, ["compile"])
assert result.exit_code == 0
# First time it compiles, it compiles fully
for file in project.path.glob("contracts/**/*"):
assert file.stem in result.output
result = runner.invoke(ape_cli, ["compile"])
assert result.exit_code == 0
# First time it compiles, it caches
for file in project.path.glob("contracts/**/*"):
assert file.stem not in result.output
if not any(c.deploymentBytecode for c in project.contracts.values()):
return # Only interfaces
result = runner.invoke(ape_cli, ["compile", "--size"])
assert result.exit_code == 0
# Still caches but displays bytecode size
for file in project.path.glob("contracts/**/*"):
assert file.stem in result.output
| 40.386364
| 97
| 0.655037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 562
| 0.316263
|
a528fb1f9441de07bad65e4dc6932a2f3895273e
| 3,720
|
py
|
Python
|
gpg_reaper.py
|
kacperszurek/gpg_reaper
|
8fd0de32944900c813f8dbb5b83bb83abcea037f
|
[
"MIT"
] | 95
|
2018-03-05T18:20:00.000Z
|
2021-09-28T18:51:40.000Z
|
gpg_reaper.py
|
n0ncetonic/gpg_reaper
|
8fd0de32944900c813f8dbb5b83bb83abcea037f
|
[
"MIT"
] | null | null | null |
gpg_reaper.py
|
n0ncetonic/gpg_reaper
|
8fd0de32944900c813f8dbb5b83bb83abcea037f
|
[
"MIT"
] | 15
|
2018-03-13T01:44:00.000Z
|
2021-12-20T09:59:26.000Z
|
# GPG Reaper
#
# MIT License
#
# Copyright (c) 2018 Kacper Szurek
# https://security.szurek.pl/
from pgpy.packet.fields import MPI, RSAPriv
from pgpy.constants import PubKeyAlgorithm, KeyFlags, HashAlgorithm, SymmetricKeyAlgorithm, CompressionAlgorithm
from pgpy import PGPKey
from pgpy.packet.packets import PrivKeyV4
import json
import codecs
import sys
import os
begin_block = '--START_GPG_REAPER--'
end_block = '--END_GPG_REAPER--'
if len(sys.argv) != 2:
print "Usage: " + __file__ + " output.txt"
os._exit(0)
file_path = sys.argv[1]
if not os.path.isfile(file_path):
print "[-] File not exist"
os._exit(0)
try:
def detect_by_bom(path,default=None):
with open(path, 'rb') as f:
raw = f.read(4)
for enc,boms in \
('utf-8-sig',(codecs.BOM_UTF8,)),\
('utf-16',(codecs.BOM_UTF16_LE,codecs.BOM_UTF16_BE)),\
('utf-32',(codecs.BOM_UTF32_LE,codecs.BOM_UTF32_BE)):
if any(raw.startswith(bom) for bom in boms): return enc
return default
file_encoding = detect_by_bom(file_path)
content = open(file_path).read()
if file_encoding:
content = content.decode(file_encoding)
begin_find = content.find(begin_block)
end_find = content.find(end_block)
if begin_find != -1 and end_find != -1:
data = json.loads(content[begin_find+len(begin_block):end_find])
if type(data) is not list:
data = [data]
for gpg in data:
try:
rsa_priv = RSAPriv()
rsa_priv.e = MPI(int(gpg['e'], 16))
rsa_priv.n = MPI(int(gpg['n'], 16))
rsa_priv.d = MPI(int(gpg['d'], 16))
rsa_priv.p = MPI(int(gpg['p'], 16))
rsa_priv.q = MPI(int(gpg['q'], 16))
rsa_priv.u = MPI(int(gpg['u'], 16))
rsa_priv._compute_chksum()
restored_priv_key = PrivKeyV4()
restored_priv_key.pkalg = PubKeyAlgorithm.RSAEncryptOrSign
restored_priv_key.keymaterial = rsa_priv
restored_priv_key.update_hlen()
pgp_key = PGPKey()
pgp_key._key = restored_priv_key
public_key, _ = PGPKey.from_blob(gpg['public'])
# fingerprint contains cration date so we need explicit copy this one
pgp_key._key.created = public_key._key.created
pgp_key.add_uid(
public_key.userids[0],
usage={
KeyFlags.Sign,
KeyFlags.EncryptCommunications,
KeyFlags.EncryptStorage
},
hashes=[
HashAlgorithm.SHA256,
HashAlgorithm.SHA384,
HashAlgorithm.SHA512,
HashAlgorithm.SHA224],
ciphers=[
SymmetricKeyAlgorithm.AES256,
SymmetricKeyAlgorithm.AES192,
SymmetricKeyAlgorithm.AES128],
compression=[
CompressionAlgorithm.ZLIB,
CompressionAlgorithm.BZ2,
CompressionAlgorithm.ZIP,
CompressionAlgorithm.Uncompressed])
# print pgp_key
key_fingeprint = pgp_key.fingerprint.replace(" ", "")
print "[+] Dump {} - {}".format(key_fingeprint, public_key.userids[0])
open(key_fingeprint+".key", "w").write(str(pgp_key))
except Exception as e:
print "[-] Error: "+str(e)
else:
print "[-] No info"
except Exception as e:
print "[-] Error: "+str(e)
| 35.09434
| 112
| 0.552419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 387
| 0.104032
|
a52c5d9d6fb9e5755519e9da6cf0e9e7b0ba2f4d
| 221
|
py
|
Python
|
db.py
|
HoolaBoola/tsoha_article_library
|
9c1d79eb06811a97c6984d4c970ee71a18724df7
|
[
"MIT"
] | null | null | null |
db.py
|
HoolaBoola/tsoha_article_library
|
9c1d79eb06811a97c6984d4c970ee71a18724df7
|
[
"MIT"
] | 2
|
2021-04-26T18:19:39.000Z
|
2021-04-26T19:43:35.000Z
|
db.py
|
HoolaBoola/tsoha_article_library
|
9c1d79eb06811a97c6984d4c970ee71a18724df7
|
[
"MIT"
] | 1
|
2021-05-06T09:10:35.000Z
|
2021-05-06T09:10:35.000Z
|
from app import app
from flask_sqlalchemy import SQLAlchemy
from os import getenv
app.config["SQLALCHEMY_DATABASE_URI"] = getenv("DATABASE_URL")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
| 24.555556
| 62
| 0.809955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 71
| 0.321267
|
a52cc5e0156fbef790ecdf07862d92b75464ebf8
| 399
|
py
|
Python
|
classifier/nets/build.py
|
yidarvin/firstaid_classification
|
5cb1ec5a896766ec4670e0daca23014a879e6c14
|
[
"MIT"
] | null | null | null |
classifier/nets/build.py
|
yidarvin/firstaid_classification
|
5cb1ec5a896766ec4670e0daca23014a879e6c14
|
[
"MIT"
] | null | null | null |
classifier/nets/build.py
|
yidarvin/firstaid_classification
|
5cb1ec5a896766ec4670e0daca23014a879e6c14
|
[
"MIT"
] | null | null | null |
import torch
from os.path import join
from fvcore.common.registry import Registry
ARCHITECTURE_REGISTRY = Registry("ARCHITECTURE")
def build_model(cfg):
arch = cfg.MODEL.ARCHITECTURE
model = ARCHITECTURE_REGISTRY.get(arch)(cfg)
if cfg.SAVE.MODELPATH and cfg.MODEL.LOADPREV:
model.load_state_dict(torch.load(join(cfg.SAVE.MODELPATH, cfg.NAME + '_best.pth')))
return model
| 26.6
| 91
| 0.749373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 25
| 0.062657
|
a52e756102241b8ea4824f9de3490cd248e22558
| 14,034
|
py
|
Python
|
graphGenerator.py
|
carlklier/flappai-bird
|
ea640005494eaf70abc22c41e502593a8aff436f
|
[
"Apache-2.0"
] | null | null | null |
graphGenerator.py
|
carlklier/flappai-bird
|
ea640005494eaf70abc22c41e502593a8aff436f
|
[
"Apache-2.0"
] | null | null | null |
graphGenerator.py
|
carlklier/flappai-bird
|
ea640005494eaf70abc22c41e502593a8aff436f
|
[
"Apache-2.0"
] | null | null | null |
#%%
import base64
import matplotlib.pyplot as plt
import numpy as np
import json
from ast import literal_eval
data1encoded = 'eyJkZGFFbmFibGVkIjpmYWxzZSwiZGF0YSI6W3sic3RhcnRUaW1lIjoxNjE5MTM3MjUxMzg1LCJkdXJhdGlvbiI6NjQ1Miwic2NvcmUiOjAsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMX0seyJzdGFydFRpbWUiOjE2MTkxMzcyNjMwMDYsImR1cmF0aW9uIjo3NTYwLCJzY29yZSI6MCwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjExfSx7InN0YXJ0VGltZSI6MTYxOTEzNzI3NTI0NywiZHVyYXRpb24iOjEyNzQ4LCJzY29yZSI6NCwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjExfSx7InN0YXJ0VGltZSI6MTYxOTEzNzI5ODc5OSwiZHVyYXRpb24iOjczOTcsInNjb3JlIjowLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTM3MzEwNzQxLCJkdXJhdGlvbiI6MTUyNTAsInNjb3JlIjo1LCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTF9LHsic3RhcnRUaW1lIjoxNjE5MTM3MzMxMDg0LCJkdXJhdGlvbiI6MjYyNjgsInNjb3JlIjoxMiwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjEyfSx7InN0YXJ0VGltZSI6MTYxOTEzNzM2MTc0NiwiZHVyYXRpb24iOjkxNzAsInNjb3JlIjoxLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTM3Mzc1Mjg1LCJkdXJhdGlvbiI6MTI2MzEsInNjb3JlIjozLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTM3MzkyMzM1LCJkdXJhdGlvbiI6MjA0MjcsInNjb3JlIjo3LCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTF9LHsic3RhcnRUaW1lIjoxNjE5MTM3NDE3MTMyLCJkdXJhdGlvbiI6OTQwNSwic2NvcmUiOjEsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMn0seyJzdGFydFRpbWUiOjE2MTkxMzc0MzA3MjgsImR1cmF0aW9uIjoxNjAxNiwic2NvcmUiOjUsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMX0seyJzdGFydFRpbWUiOjE2MTkxMzc0NjEwNDUsImR1cmF0aW9uIjo4MTU2LCJzY29yZSI6MCwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjExfSx7InN0YXJ0VGltZSI6MTYxOTEzNzU1Njk1NCwiZHVyYXRpb24iOjg2NzIsInNjb3JlIjoxLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTM3NTY5NzA4LCJkdXJhdGlvbiI6MTIwNDAsInNjb3JlIjozLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTF9LHsic3RhcnRUaW1lIjoxNjE5MTM3NTg2MjU1LCJkdXJhdGlvbiI6MTI3NTIsInNjb3JlIjozLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTF9LHsic3RhcnRUaW1lIjoxNjE5MTM3NjAzNzE5LCJkdXJhdGlvbiI6OTM1MCwic2NvcmUiOjEsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMn0seyJzdGFydFRpbWUiOjE2MTkxMzc2MTY5OTcsImR1cmF0aW9uIjoxNzczMSwic2NvcmUiOjcsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMn0seyJzdGFydFRpbWUiOjE2MTkxMzc2MzkwODMsImR1cmF0aW9uIjo4Nzk3LCJzY29yZSI6MSwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjExfSx7InN0YXJ0VGltZSI6MTYxOTEzNzY1MjMyNywiZHVyYXRpb24iOjc4OTMsInNjb3JlIjoxLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTM3NjY1MTMxLCJkdXJhdGlvbiI6Njg1Miwic2NvcmUiOjAsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMn0seyJzdGFydFRpbWUiOjE2MTkxMzc2NzYzODcsImR1cmF0aW9uIjo4ODg5LCJzY29yZSI6MSwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjEyfSx7InN0YXJ0VGltZSI6MTYxOTEzNzY4OTEwMiwiZHVyYXRpb24iOjcwMjAsInNjb3JlIjowLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTM3Njk5Nzk3LCJkdXJhdGlvbiI6ODcxNSwic2NvcmUiOjEsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMn1dfQ=='
data1bytes = base64.b64decode(data1encoded)
data1 = json.loads(data1bytes.decode('utf8'))
data2encoded = 'eyJkZGFFbmFibGVkIjpmYWxzZSwiZGF0YSI6W3sic3RhcnRUaW1lIjoxNjE5MTg0NTQ1Nzk0LCJkdXJhdGlvbiI6NjMyMiwic2NvcmUiOjAsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMn0seyJzdGFydFRpbWUiOjE2MTkxODQ1NzMyMDgsImR1cmF0aW9uIjo2NTQ1LCJzY29yZSI6MCwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjExfSx7InN0YXJ0VGltZSI6MTYxOTE4NDU4NzE3NSwiZHVyYXRpb24iOjY5NjEsInNjb3JlIjowLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTF9LHsic3RhcnRUaW1lIjoxNjE5MTg0NjI2ODk3LCJkdXJhdGlvbiI6MTIzODYsInNjb3JlIjo0LCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTg0NjY5OTE3LCJkdXJhdGlvbiI6MzA4NjUsInNjb3JlIjoxOCwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjExfSx7InN0YXJ0VGltZSI6MTYxOTE4NDk5NTc0NSwiZHVyYXRpb24iOjc1MjAsInNjb3JlIjoxLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTg1MDA4MTAzLCJkdXJhdGlvbiI6MTc0NTYsInNjb3JlIjo4LCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTg1Mzg2ODE4LCJkdXJhdGlvbiI6MTA4ODIsInNjb3JlIjozLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTg1NDk1NTE3LCJkdXJhdGlvbiI6MjA1NzcsInNjb3JlIjoxMCwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjExfSx7InN0YXJ0VGltZSI6MTYxOTE4NTUyMzk3MCwiZHVyYXRpb24iOjE0MjczLCJzY29yZSI6NiwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjEyfSx7InN0YXJ0VGltZSI6MTYxOTE4NTU0NTAwOSwiZHVyYXRpb24iOjY2MDksInNjb3JlIjowLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTF9LHsic3RhcnRUaW1lIjoxNjE5MTg1NTU2OTg2LCJkdXJhdGlvbiI6MTAwMTgsInNjb3JlIjoyLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTg1NTcxMDg4LCJkdXJhdGlvbiI6MTA3MzcsInNjb3JlIjozLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTF9LHsic3RhcnRUaW1lIjoxNjE5MTg1NTg5MTEyLCJkdXJhdGlvbiI6NjIxMCwic2NvcmUiOjAsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMX0seyJzdGFydFRpbWUiOjE2MTkxODU1OTk4MjQsImR1cmF0aW9uIjo3MjAxLCJzY29yZSI6MCwiZ3Jhdml0eSI6MC4yNSwicGlwZUludGVydmFsIjoxNDAwLCJwaXBlaGVpZ2h0Ijo5MCwiY29sbGlzaW9uUG9zaXRpb24iOjExfSx7InN0YXJ0VGltZSI6MTYxOTE4NTYxMTY3MywiZHVyYXRpb24iOjgxMTMsInNjb3JlIjoxLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTg1NjI0MDAxLCJkdXJhdGlvbiI6ODc4NSwic2NvcmUiOjIsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMX0seyJzdGFydFRpbWUiOjE2MTkxODU2MzY4MjMsImR1cmF0aW9uIjoxNTI2NSwic2NvcmUiOjYsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMX0seyJzdGFydFRpbWUiOjE2MTkxODU2NTYzNjgsImR1cmF0aW9uIjoyMjg4MSwic2NvcmUiOjEyLCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTF9LHsic3RhcnRUaW1lIjoxNjE5MTg1NjgzNjM3LCJkdXJhdGlvbiI6MTIxNDQsInNjb3JlIjo0LCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTJ9LHsic3RhcnRUaW1lIjoxNjE5MTg1Njk5NTgyLCJkdXJhdGlvbiI6MTQyNzMsInNjb3JlIjo2LCJncmF2aXR5IjowLjI1LCJwaXBlSW50ZXJ2YWwiOjE0MDAsInBpcGVoZWlnaHQiOjkwLCJjb2xsaXNpb25Qb3NpdGlvbiI6MTF9LHsic3RhcnRUaW1lIjoxNjE5MTg1NzIwOTkwLCJkdXJhdGlvbiI6ODgzMywic2NvcmUiOjIsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMn0seyJzdGFydFRpbWUiOjE2MTkxODU3MzY1MzAsImR1cmF0aW9uIjoxMDczNywic2NvcmUiOjMsImdyYXZpdHkiOjAuMjUsInBpcGVJbnRlcnZhbCI6MTQwMCwicGlwZWhlaWdodCI6OTAsImNvbGxpc2lvblBvc2l0aW9uIjoxMX1dfQ=='
data2bytes = base64.b64decode(data2encoded)
data2 = json.loads(data2bytes.decode('utf8'))
scores1 = []
gravity1 = []
pipeInterval1 = []
pipeHeight1 = []
for data in data1["data"]:
scores1.append(data['score'])
gravity1.append(data['gravity'])
pipeInterval1.append(data['pipeInterval'])
pipeHeight1.append(data['pipeheight'])
scores2 = []
gravity2 = []
pipeInterval2 = []
pipeHeight2 = []
for data in data2["data"]:
scores2.append(data['score'])
gravity2.append(data['gravity'])
pipeInterval2.append(data['pipeInterval'])
pipeHeight2.append(data['pipeheight'])
x = np.arange(1, 24, 1)
fig, ax = plt.subplots() # Create a figure containing a single axes.
ax.plot(x, scores1, label='JOE')
ax.plot(x, scores2, label='DALTON')
plt.xticks(x)
ax.set_xlabel('Playthrough')
ax.set_ylabel('Score')
ax.set_title('Scores over the playthroughs')
ax.legend()
fig2, ax2 = plt.subplots() # Create a figure containing a single axes.
ax2.plot(x, gravity1, label='player1')
ax2.plot(x, gravity2, label='player2')
plt.xticks(x)
ax2.set_xlabel('Playthrough')
ax2.set_ylabel('Gravity')
ax2.set_title('Gravity strength over 10 playthroughs')
ax2.legend()
fig3, ax3 = plt.subplots() # Create a figure containing a single axes.
ax3.plot(x, pipeInterval1, label='player1')
ax3.plot(x, pipeInterval2, label='player2')
plt.xticks(x)
ax3.set_xlabel('Playthrough')
ax3.set_ylabel('Pipe Interval Distance')
ax3.set_title('Pipe interval distance over 10 playthroughs')
ax3.legend()
fig4, ax4 = plt.subplots() # Create a figure containing a single axes.
ax4.plot(x, pipeHeight1, label='player1')
ax4.plot(x, pipeHeight2, label='player2')
plt.xticks(x)
ax4.set_xlabel('Playthrough')
ax4.set_ylabel('Pipe Height')
ax4.set_title('Pipe height over 10 playthroughs')
ax4.legend()
# %%
import csv
import base64
import matplotlib.pyplot as plt
import numpy as np
import json
data = []
with open('notddaEnabledData.csv') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
data.append(row[0])
allData = []
for testerData in data:
dataBytes = base64.b64decode(testerData)
jsonData = json.loads(dataBytes.decode('utf8'))
scores = []
pipeInterval = []
pipeHeight = []
i = 0
for run in jsonData["data"]:
if(i != 0 and run['pipeInterval'] == 1410.5):
break
scores.append(run['score'])
pipeInterval.append(run['pipeInterval'])
pipeHeight.append(run['pipeheight'])
i = i + 1
xvals = np.arange(1, len(scores) + 1, 1)
playerData = {"xvals":xvals, "scores":scores, "pipeInterval":pipeInterval, "pipeHeight":pipeHeight}
allData.append(playerData)
fig, ax = plt.subplots()
ax.set_xlabel('Playthrough')
ax.set_ylabel('Score')
ax.set_title('DDA Enabled Scores')
for i in range(len(allData)):
ax.plot(allData[i]["xvals"], allData[i]["scores"], label='player' + str(i))
ax.legend()
fig, ax = plt.subplots()
ax.set_xlabel('Playthrough')
ax.set_ylabel('pipeInterval')
ax.set_title('Baseline pipeInterval Distances')
for i in range(len(allData)):
ax.plot(allData[i]["xvals"], allData[i]["pipeInterval"], label='player' + str(i))
ax.legend()
fig, ax = plt.subplots()
ax.set_xlabel('Playthrough')
ax.set_ylabel('pipeHeight')
ax.set_title('Baseline pipeHeight Distances')
for i in range(len(allData)):
ax.plot(allData[i]["xvals"], allData[i]["pipeHeight"], label='player' + str(i))
ax.legend()
# %%
import csv
import base64
import matplotlib.pyplot as plt
import numpy as np
import json
dataEnabled = []
with open('ddaEnabledData.csv') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
dataEnabled.append(row[0])
dataNotEnabled = []
with open('notddaEnabledData.csv') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
dataNotEnabled.append(row[0])
allDataEnabled = []
for testerData in dataEnabled:
dataBytes = base64.b64decode(testerData)
jsonData = json.loads(dataBytes.decode('utf8'))
scores = []
pipeInterval = []
pipeHeight = []
i = 0
for run in jsonData["data"]:
if(i != 0 and run['pipeInterval'] == 1410.5):
break
scores.append(run['score'])
pipeInterval.append(run['pipeInterval'])
pipeHeight.append(run['pipeheight'])
i = i + 1
xvals = np.arange(1, len(scores) + 1, 1)
playerData = {"xvals":xvals, "scores":scores, "pipeInterval":pipeInterval, "pipeHeight":pipeHeight}
allDataEnabled.append(playerData)
allDataNotEnabled = []
for testerData in dataNotEnabled:
dataBytes = base64.b64decode(testerData)
jsonData = json.loads(dataBytes.decode('utf8'))
scores = []
pipeInterval = []
pipeHeight = []
i = 0
for run in jsonData["data"]:
if(i != 0 and run['pipeInterval'] == 1410.5):
break
scores.append(run['score'])
pipeInterval.append(run['pipeInterval'])
pipeHeight.append(run['pipeheight'])
i = i + 1
xvals = np.arange(1, len(scores) + 1, 1)
playerData = {"xvals":xvals, "scores":scores, "pipeInterval":pipeInterval, "pipeHeight":pipeHeight}
allDataNotEnabled.append(playerData)
fig, ax = plt.subplots()
#plt.legend(bbox_to_anchor=(1.04,1), loc="upper left")
ax.set_xlabel('Playthrough')
ax.set_ylabel('Score')
ax.set_title('DDA vs. Baseline Scores')
for i in range(len(allDataEnabled)):
ax.plot(allDataEnabled[i]["xvals"], allDataEnabled[i]["scores"], label='DDA_Enabled', color='green')
for i in range(len(allDataNotEnabled)):
ax.plot(allDataNotEnabled[i]["xvals"], allDataNotEnabled[i]["scores"], label='Baseline', color='red')
handles, labels = plt.gca().get_legend_handles_labels()
labels, ids = np.unique(labels, return_index=True)
handles = [handles[i] for i in ids]
plt.legend(handles, labels, loc='best')
#ax.legend()
# %%
| 64.972222
| 4,005
| 0.862619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9,370
| 0.667664
|
a5320d08df77982d660989950f89ae694eb0d00c
| 2,870
|
py
|
Python
|
C45Tree/apply.py
|
ManuelFreytag/Algorithm_implementation
|
380453c2bd4a66e8d604ecdf91c68cb1e14f6bb8
|
[
"MIT"
] | 1
|
2018-07-31T08:29:11.000Z
|
2018-07-31T08:29:11.000Z
|
C45Tree/apply.py
|
ManuelFreytag/Algorithm_implementation
|
380453c2bd4a66e8d604ecdf91c68cb1e14f6bb8
|
[
"MIT"
] | null | null | null |
C45Tree/apply.py
|
ManuelFreytag/Algorithm_implementation
|
380453c2bd4a66e8d604ecdf91c68cb1e14f6bb8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 30 16:08:04 2016
@author: Manuel
"""
from C45Tree_own import split
import pandas as pa
def apply(X, tree):
results = []
for x in range(0,len(X.index)):
temp_tree = tree.copy()
example = X.loc[x,:]
while(True == True):
#Search for the correct next value
for i in range(0,len(temp_tree)):
node = searchNextNode(temp_tree[i])
#Check for numeric attributes
try:
if(X[node[0]].str.isnumeric().loc[0] == True):
#Phrase the first part and cast the second part
#check the what portion of the string needs to be removed
example = checkAndCompose(example, node)
except AttributeError:
if(split.typeCheck(X[node[0]].loc[0].dtype)=="numeric"):
example = checkAndCompose(example, node)
if(example.loc[node[0]] == node[1]):
#Cut the correct subtree
temp_tree = temp_tree[i]
break
#Check if we already have a classification solution
if(isinstance(temp_tree[0], list) == True):
#No solution, new cut
temp_tree = temp_tree[1]
else:
#Solution, add the result to array
results = results + [temp_tree[2]]
break
return results
def checkAndCompose(example, node):
pa.options.mode.chained_assignment = None
if(node[1][0:2] == "<="):
if(float(example.loc[node[0]]) <= float(node[1][2:])):
example.loc[node[0]] = node[1]
# example.loc.__setitem__((node[0]), node[1])
if(node[1][0] == ">"):
if(float(example.loc[node[0]]) > float(node[1][1:])):
example.loc[node[0]] = node[1]
# example.loc.__setitem__((node[0]), node[1])
pa.options.mode.chained_assignment = 'warn'
return example
def searchInTree(tree, path):
temp_path = path.copy()
temp_tree = tree.copy()
while(isinstance(temp_tree, list) == True):
#move one dimension in
if(len(temp_path) > 0):
temp_tree = temp_tree[temp_path[0]]
#Remove done path part
temp_path.pop(0)
else:
#If all parts of the path are used, we search for the very first item
temp_tree = temp_tree[0]
return temp_tree
def searchNextNode(tree):
temp_tree = tree.copy()
while(isinstance(temp_tree[0], list) == True):
temp_tree = temp_tree[0]
return temp_tree
| 32.247191
| 81
| 0.501394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 635
| 0.221254
|
a5323fdb26fc504a82070a2ba96f6ac67837b9e8
| 1,545
|
py
|
Python
|
application_form/migrations/0005_occupancy_id_to_int.py
|
frwickst/apartment-application-service
|
40387327a0f82ba01bfcb6ab8532ea4aec40d37a
|
[
"MIT"
] | 1
|
2021-03-15T11:29:12.000Z
|
2021-03-15T11:29:12.000Z
|
application_form/migrations/0005_occupancy_id_to_int.py
|
frwickst/apartment-application-service
|
40387327a0f82ba01bfcb6ab8532ea4aec40d37a
|
[
"MIT"
] | 130
|
2020-09-07T08:30:29.000Z
|
2022-03-29T11:49:27.000Z
|
application_form/migrations/0005_occupancy_id_to_int.py
|
frwickst/apartment-application-service
|
40387327a0f82ba01bfcb6ab8532ea4aec40d37a
|
[
"MIT"
] | 4
|
2020-09-07T05:34:13.000Z
|
2021-11-07T12:51:21.000Z
|
# Generated by Django 2.2.16 on 2020-10-28 06:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("application_form", "0004_add_apartment"),
]
operations = [
migrations.AlterModelOptions(
name="hasoapartmentpriority",
options={
"permissions": [
("haso_create", "Can create new haso applications."),
("haso_update", "Can update the existing haso applications."),
(
"haso_delete",
"Can remove remove the existing haso applications.",
),
]
},
),
migrations.AlterModelOptions(
name="hasoapplication",
options={},
),
migrations.AlterModelOptions(
name="hitasapplication",
options={
"permissions": [
("hitas_create", "Can create new hitas applications."),
("hitas_update", "Can update the existing hitas applications."),
(
"hitas_delete",
"Can remove remove the existing hitas applications.",
),
]
},
),
migrations.AlterField(
model_name="hasoapplication",
name="right_of_occupancy_id",
field=models.IntegerField(verbose_name="right of occupancy ID"),
),
]
| 31.530612
| 84
| 0.487379
| 1,451
| 0.939159
| 0
| 0
| 0
| 0
| 0
| 0
| 577
| 0.373463
|
a5344db9359a04a85d968f45541c787bd80db9a0
| 579
|
py
|
Python
|
sample.py
|
aakibqureshi/goibibo_python
|
fb8b43eb85bad9baf016e701051a3196ac84a23d
|
[
"MIT"
] | null | null | null |
sample.py
|
aakibqureshi/goibibo_python
|
fb8b43eb85bad9baf016e701051a3196ac84a23d
|
[
"MIT"
] | null | null | null |
sample.py
|
aakibqureshi/goibibo_python
|
fb8b43eb85bad9baf016e701051a3196ac84a23d
|
[
"MIT"
] | null | null | null |
"""
Sample Example
"""
from goibibo import goibiboAPI
GO = goibiboAPI("ae51f09b", "da2d83a905110d15a51795b018605026")
print GO.FlightSearch("BLR", "HYD", 20141028)
print GO.MinimumFare("BLR", "HYD", 20141028)
print GO.BusSearch("bangalore", "hyderabad", 20141028)
print GO.BusSeatMap("vJ52KC0ymd0635qTD9bDDy9GHBkGl5FJMJje0aFX\
_GQTyev_4N9Y62TTfrmS-Re3dCHl0-UxLq4AsoQ%3D")
print GO.SearchHotelsByCity(6771549831164675055)
print GO.GetHotelData([1017089108070373346, 6085103403340214927])
print GO.GetHotelPriceByCity(6771549831164675055, 20141101, 20141102)
| 36.1875
| 69
| 0.789292
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 214
| 0.369603
|
a534ca263cb0f9b6e64a0efc7bf4cd98c4b776a8
| 1,237
|
py
|
Python
|
aiogithub/objects/rate_limit.py
|
flying-sheep/aiogithub
|
566252cac036d9abe2b1eb2acb268547e01ca63e
|
[
"BSD-3-Clause"
] | 10
|
2016-09-13T15:50:10.000Z
|
2021-05-27T15:36:58.000Z
|
aiogithub/objects/rate_limit.py
|
flying-sheep/aiogithub
|
566252cac036d9abe2b1eb2acb268547e01ca63e
|
[
"BSD-3-Clause"
] | 5
|
2017-03-26T13:51:40.000Z
|
2020-04-22T19:46:36.000Z
|
aiogithub/objects/rate_limit.py
|
flying-sheep/aiogithub
|
566252cac036d9abe2b1eb2acb268547e01ca63e
|
[
"BSD-3-Clause"
] | 2
|
2020-03-05T06:07:18.000Z
|
2022-02-11T14:23:46.000Z
|
from datetime import datetime
from dateutil.tz import tzutc
from aiogithub.objects.response import BaseResponseObject
from aiogithub.utils import return_key
class RateLimitDetail(BaseResponseObject):
def _normalise_key(self, document, key):
if key == 'reset' and not isinstance(document['reset'], datetime):
document['reset'] = datetime.fromtimestamp(document['reset'],
tz=tzutc())
@property
@return_key
def limit(self) -> int:
pass
@property
@return_key
def reset(self) -> datetime:
pass
@property
@return_key
def remaining(self) -> int:
pass
class RateLimit(BaseResponseObject):
_url = 'rate_limit'
@staticmethod
def _get_key_mappings():
return {
'core': RateLimitDetail,
'search': RateLimitDetail
}
def _normalise_document(self, document):
if document:
document = document['resources']
return super()._normalise_document(document)
@property
@return_key
def core(self) -> RateLimitDetail:
pass
@property
@return_key
def search(self) -> RateLimitDetail:
pass
| 22.089286
| 74
| 0.611964
| 1,072
| 0.866613
| 0
| 0
| 507
| 0.409863
| 0
| 0
| 65
| 0.052546
|
a5357ab347b4e01cb284298a5ffef482b143a7cb
| 736
|
py
|
Python
|
python/basic_opertions.py
|
runningforlife/CodingExamples
|
808b12cdb996390225d40a687bf6215c4b7d1822
|
[
"Apache-2.0"
] | null | null | null |
python/basic_opertions.py
|
runningforlife/CodingExamples
|
808b12cdb996390225d40a687bf6215c4b7d1822
|
[
"Apache-2.0"
] | null | null | null |
python/basic_opertions.py
|
runningforlife/CodingExamples
|
808b12cdb996390225d40a687bf6215c4b7d1822
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
"""how to user basic math operations in python """
def math_operations():
a = 5 + 3
assert a == 8
b = 5 - 3
assert b == 2
c = 5 * 3
assert c == 15
assert isinstance(c, int)
assert 5 / 3 == 1.666666666666666666666667
assert 8 / 2 == 4
assert 5 % 3 == 2
assert 5 ** 3 == 125
assert 2 ** 3 == 8
assert 5 // 3 == 1
assert 8 // 4 == 2
print("math operation done")
def do_bitwise_operations():
"""bitwise operations"""
assert 5 & 3 == 1
assert ~4 == -5
assert 5 ^ 3 == 6
assert 5 >> 1 == 2
assert 5 << 2 == 20
print("bitwise operations done")
if __name__ == "__main__":
math_operations()
do_bitwise_operations()
| 16
| 50
| 0.539402
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 147
| 0.199728
|
a5379fd45bcc411d7e294e71572901a73fd67651
| 8,204
|
py
|
Python
|
cogs/original_command.py
|
RT-Team/rt-bot
|
39698efb6b2465de1e84063cba9d207a5bf07fa5
|
[
"BSD-4-Clause"
] | 26
|
2021-11-30T02:48:16.000Z
|
2022-03-26T04:47:25.000Z
|
cogs/original_command.py
|
RT-Team/rt-bot
|
39698efb6b2465de1e84063cba9d207a5bf07fa5
|
[
"BSD-4-Clause"
] | 143
|
2021-11-04T07:47:53.000Z
|
2022-03-31T23:13:33.000Z
|
cogs/original_command.py
|
RT-Team/rt-bot
|
39698efb6b2465de1e84063cba9d207a5bf07fa5
|
[
"BSD-4-Clause"
] | 14
|
2021-11-12T15:32:27.000Z
|
2022-03-28T04:04:44.000Z
|
# RT - Original Command
from __future__ import annotations
from discord.ext import commands
import discord
from aiomysql import Pool, Cursor
from rtutil import DatabaseManager
class DataManager(DatabaseManager):
TABLE = "OriginalCommand"
def __init__(self, pool: Pool):
self.pool = pool
async def _prepare_table(self, cursor: Cursor = None) -> None:
await cursor.execute(
"""CREATE TABLE IF NOT EXISTS OriginalCommand (
GuildID BIGINT, Command TEXT,
Content TEXT, Reply TINYINT
);"""
)
async def _exists(self, cursor, guild_id: int, command: str) -> tuple[bool, str]:
# コマンドが存在しているかを確認します。
condition = "GuildID = %s AND Command = %s"
await cursor.execute(
f"SELECT * FROM {self.TABLE} WHERE {condition};",
(guild_id, command)
)
return bool(await cursor.fetchone()), condition
async def write(
self, guild_id: int, command: str,
content: str, reply: bool, cursor: Cursor = None
) -> None:
"書き込みます。"
if (c := await self._exists(cursor, guild_id, command))[0]:
await cursor.execute(
f"UPDATE {self.TABLE} SET Content = %s, Reply = %s WHERE {c[1]};",
(content, reply, guild_id, command)
)
else:
await cursor.execute(
f"INSERT INTO {self.TABLE} VALUES (%s, %s, %s, %s);",
(guild_id, command, content, reply)
)
async def delete(self, guild_id: int, command: str, cursor: Cursor = None) -> None:
"データを削除します"
if (c := await self._exists(cursor, guild_id, command))[0]:
await cursor.execute(
f"DELETE FROM {self.TABLE} WHERE GuildID = %s AND Command = %s;",
(guild_id, command)
)
else:
raise KeyError("そのコマンドが見つかりませんでした。")
async def read(self, guild_id: int, cursor: Cursor = None) -> list:
"データを読み込みます。"
await cursor.execute(
f"SELECT * FROM {self.TABLE} WHERE GuildID = %s;",
(guild_id,)
)
return await cursor.fetchall()
async def read_all(self, cursor: Cursor = None) -> list:
"全てのデータを読み込みます。"
await cursor.execute(f"SELECT * FROM {self.TABLE};")
return await cursor.fetchall()
class OriginalCommand(commands.Cog, DataManager):
def __init__(self, bot):
self.bot = bot
self.data = {}
self.bot.loop.create_task(self.on_ready())
async def on_ready(self):
super(commands.Cog, self).__init__(self.bot.mysql.pool)
await self._prepare_table()
await self.update_cache()
async def update_cache(self):
self.data = {}
for row in await self.read_all():
if row:
if row[0] not in self.data:
self.data[row[0]] = {}
self.data[row[0]][row[1]] = {
"content": row[2],
"reply": row[3]
}
LIST_MES = {
"ja": ("自動返信一覧", "部分一致"),
"en": ("AutoReply", "Partially consistent")
}
@commands.group(
aliases=["cmd", "コマンド", "こまんど"],
extras={
"headding": {
"ja": "自動返信、オリジナルコマンド機能",
"en": "Auto reply, Original Command."
}, "parent": "ServerUseful"
}
)
async def command(self, ctx):
"""!lang ja
--------
自動返信、オリジナルコマンド機能です。
`rt!command`で登録されているコマンドの確認が可能です。
Aliases
-------
cmd, こまんど, コマンド
!lang en
--------
Auto reply, original command.
You can do `rt!command` to see commands which has registered.
Aliases
-------
cmd"""
if not ctx.invoked_subcommand:
if (data := self.data.get(ctx.guild.id)):
lang = self.bot.cogs["Language"].get(ctx.author.id)
embed = discord.Embed(
title=self.LIST_MES[lang][0],
description="\n".join(
(f"{cmd}:{data[cmd]['content']}\n "
f"{self.LIST_MES[lang][1]}:{bool(data[cmd]['reply'])}")
for cmd in data
),
color=self.bot.colors["normal"]
)
await ctx.reply(embed=embed)
else:
await ctx.reply(
{"ja": "自動返信はまだ登録されていません。",
"en": "AutoReplies has not registered anything yet."}
)
@command.command("set", aliases=["せっと"])
@commands.has_permissions(manage_messages=True)
@commands.cooldown(1, 7, commands.BucketType.guild)
async def set_command(self, ctx, command, auto_reply: bool, *, content):
"""!lang ja
--------
オリジナルコマンドを登録します。
Parameters
----------
command : str
コマンド名です。
auto_reply : bool
部分一致で返信をするかどうかです。
これをonにするとcommandがメッセージに含まれているだけで反応します。
offにするとcommandがメッセージに完全一致しないと反応しなくなります。
content : str
返信内容です。
Examples
--------
`rt!command set ようこそ off ようこそ!RTサーバーへ!!`
`rt!command set そうだよ on そうだよ(便乗)`
Aliases
-------
せっと
!lang en
--------
Register original command.
Parameters
----------
command : str
Command name.
auto_reply : bool
This is whether or not to reply with a partial match.
If you turn this on, it will respond only if the command is included in the message.
If you turn it off, it will not respond unless the command is an exact match to the message.
content : str
The content of the reply.
Examples
--------
`rt!command set Welcome! off Welcome to RT Server!!`
`rt!command set Yes on Yes (free ride)`"""
await ctx.trigger_typing()
if len(self.data.get(ctx.guild.id, ())) == 50:
await ctx.reply(
{"ja": "五十個より多くは登録できません。",
"en": "You cannot register more than 50."}
)
else:
await self.write(ctx.guild.id, command, content, auto_reply)
await self.update_cache()
await ctx.reply("Ok")
@command.command("delete", aliases=["del", "rm", "さくじょ", "削除"])
@commands.has_permissions(manage_messages=True)
@commands.cooldown(1, 7, commands.BucketType.guild)
async def delete_command(self, ctx, *, command):
"""!lang ja
--------
コマンドを削除します。
Parameters
----------
command : str
削除するコマンドの名前です。
Aliases
-------
del, rm, さくじょ, 削除
!lang en
--------
Delete command.
Parameters
----------
command : str
Target command name.
Aliases
-------
del, rm"""
await ctx.trigger_typing()
try:
await self.delete(ctx.guild.id, command)
except KeyError:
await ctx.reply(
{"ja": "そのコマンドが見つかりませんでした。",
"en": "The command is not found."}
)
else:
await self.update_cache()
await ctx.reply("Ok")
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if not message.guild:
return
if ((data := self.data.get(message.guild.id))
and message.author.id != self.bot.user.id
and not message.content.startswith(
tuple(self.bot.command_prefix))):
count = 0
for command in data:
if ((data[command]["reply"] and command in message.content)
or command == message.content):
await message.reply(data[command]["content"])
count += 1
if count == 3:
break
def setup(bot):
bot.add_cog(OriginalCommand(bot))
| 30.385185
| 104
| 0.50902
| 8,740
| 0.973057
| 0
| 0
| 5,537
| 0.616455
| 7,554
| 0.841015
| 3,862
| 0.429971
|
a53a2c90ed2f68c611f75caaa74a581e8ab0f1b5
| 12,626
|
py
|
Python
|
cli_stats/get_data/api_scraper/api_scraper.py
|
timoudas/premier_league_api
|
2b850466ed1c910ee901c68e660706d55f53df61
|
[
"MIT"
] | 2
|
2020-02-13T12:30:47.000Z
|
2020-03-21T16:32:47.000Z
|
cli_stats/get_data/api_scraper/api_scraper.py
|
timoudas/premier_league_api
|
2b850466ed1c910ee901c68e660706d55f53df61
|
[
"MIT"
] | 2
|
2021-04-06T18:27:57.000Z
|
2021-06-02T03:51:47.000Z
|
cli_stats/get_data/api_scraper/api_scraper.py
|
timoudas/premier_league_api
|
2b850466ed1c910ee901c68e660706d55f53df61
|
[
"MIT"
] | null | null | null |
import re
import requests
import sys
sys.path.append('cli_stats')
from directory import Directory
from pprint import pprint
from storage_config import StorageConfig
from tqdm import tqdm
session = requests.Session()
#TODO
"""
*Program is not scaling well
"""
"""***HOW TO USE***
1. Create an instance of Football, this initiates the leagues dict which holds
all the leagueIDs.
fb = Football()
2. To get the all the seasons for all leagues, first run the the method
fb.load_leagues()
this fills the leagues dict with nessesery info to make further querys.
To get season values the league abbreviation has to be passed like below:
fb.leagues['EN_PR'].load_seasons()
This selects the key 'EN_PR' which is the parent key in leagues and loads
the season for that league by running the method load.seasons() which is in
class Leagues(). This returns a dict seasons holding the following:
1992/93': {'competition': 1, 'id': 1, 'label': '1992/93'}
Where the '1992/93' is the key containing that seasons information.
***WHAT IS NEEDED FOR ARBITRAIRY QUERYS***
League abbreviation
Season label
Team name
"""
def load_raw_data(url):
"""Retreives Ids for different pages on the API"""
page = 0
data_temp = []
while True:
headers = {'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Origin': 'https://www.premierleague.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'
}
params = (('pageSize', '100'),
('page', str(page),))
# request to obtain the team info
try:
response = session.get(url, headers=headers, params=params).json()
if url.endswith('staff'):
data = response['players']
return data
elif 'fixtures' in url:
data = response["content"]
#loop to get info for each game
data_temp.extend(data)
else:
data = response['content']
# note: bit of a hack, for some reason 'id' is a float, but everywhere it's referenced, it's an int
for d in data:
d['id'] = int(d['id'])
return data
except Exception as e:
print(e, 'Something went wrong with the request')
return {}
page += 1
if page >= response["pageInfo"]["numPages"]:
break
for d in data_temp:
d['id'] = int(d['id'])
return data_temp
class TeamPlayers(dict):
_players = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def load_players_for_team(self, team, season):
ds = load_raw_data(
f'https://footballapi.pulselive.com/football/teams/{team}/compseasons/{season}/staff')
self._players.clear()
self.clear()
for d in ds:
if d:
self._players[d['id']] = d
self[d['id']] = self._players[d['id']]
return self._players
class FixtureInfo(dict):
_fixtures = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def load_info_for_fixture(self, season):
ds = load_raw_data(
f'https://footballapi.pulselive.com/football/fixtures?compSeasons={season}')
self.clear()
for d in ds:
self._fixtures[d['id']] = d
self[d['id']] = self._fixtures[d['id']]
return self._fixtures
class SeasonTeams(dict):
"""Creates an object for a team given a season """
_teams = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class Team(dict):
"""Creates an object for a team in a competion and specific season
Args:
competition (str): Competition abbreviation
"""
def __init__(self, competition, *args, **kwargs):
super().__init__(*args, **kwargs)
self['competition'] = competition
self.players = TeamPlayers()#Returns Ids and info for every player on a team
def load_players(self):
"""returns info for all the players given their id and a season _id"""
return self.players.load_players_for_team(self['id'], self['competition'])
def load_teams_for_season(self, season, comp):
ds = load_raw_data(
f'https://footballapi.pulselive.com/football/teams?comps={comp}&compSeasons={season}')
self.clear()
self._teams.clear()
for d in ds:
d['competition'] = comp
self._teams[d['id']] = self.Team(season, d)
self[d['shortName']] = self._teams[d['id']]
return self._teams
#NO IDE HOW THIS WORKS - REPLICATE SeasonTeams
class SeasonFixtures(dict):
"""Creates an object for all fixtures in a given a season """
_fixtures = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class Fixture(dict):
"""Creates an object for a fixture in a competion and specific season"""
def __init__(self, competition, *args, **kwargs):
super().__init__(*args, **kwargs)
self['competition'] = competition
self.fixture = FixtureInfo()#Returns Ids and info for every player on a team
def load_fixture(self):
"""returns info for a fixture given it's Id"""
self.fixture.load_info_for_fixture(self['id'])
def load_fixture_for_season(self, season):
ds = load_raw_data(
f'https://footballapi.pulselive.com/football/fixtures?compSeasons={season}')
self.clear()
for d in ds:
d['competition'] = season
self._fixtures[d['id']] = self.Fixture(season, d)
self[d['status']] = self._fixtures[d['id']]
return self._fixtures
class Season(dict):
all_teams = SeasonTeams()
def __init__(self, competition, *args, **kwargs):
super().__init__(*args, **kwargs)
self['competition'] = competition
self.teams = SeasonTeams()
self.fixtures = SeasonFixtures()
def load_teams(self):
return self.teams.load_teams_for_season(self['id'], self['competition'])
def load_played_fixtures(self):
return self.fixtures.load_fixture_for_season(self['id'])
def load_unplayed_fixtures(self):
pass
def load_all_fixtures(self):
pass
class League(dict):
"""Gets Season_ids, returns a dict"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.seasons = {} #Initates dictionairy to hold seasonIds
def season_label(self, label):
try:
return re.search( r'(\d{4}/\d{4})', label).group()
except:
label = re.search( r'(\d{4}/\d{2})', label).group()
return re.sub(r'(\d{4}/)', r'\g<1>20', label)
def load_seasons(self):
"""Returns a dict with season label as key and season id as value"""
ds = load_raw_data(f'https://footballapi.pulselive.com/football/competitions/{self["id"]}/compseasons')
self.seasons = {self.season_label(d['label']): Season(self['id'], d) for d in ds}
return self.seasons
class Football:
"""Gets Competition_abbreviation, returns a dict"""
def __init__(self):
self.leagues = {} #Initates dictionairy to hold leagueIds
def load_leagues(self):
"""Returns a dict with league abbreviation as key and league id as value"""
ds = load_raw_data('https://footballapi.pulselive.com/football/competitions')
self.leagues = {d['abbreviation']: League(d) for d in ds}
return self.leagues
class ValidateParams():
"""Checks if all needed information exist on api for a league by season.
Input: A leagueID to check
Output: Console output with True/False values if information exist
**How the class checks if data exists**:
User provides a known leagueID, a request is made with the ID to see which seasons
exist.
If no seasonIDs exist, it stops else takes all the seasonIDs and stores them.
For each seasonID it checks if fixtures exists, if it exists it stores them and
uses them to see if fixture stats exists.
If fixture stats exist it requests att teams in
"""
dir = Directory()
fb = Football()
def __init__(self, league_file='league_params.json', team_seasons_file='teams_params.json' ):
self.leagues = self.import_id(league_file)
self.team_seasons = self.import_id(team_seasons_file)
self.league_file = league_file
def import_id(self, file):
"""Imports a json file in read mode
Args:
file(str): Name of file
"""
return self.dir.load_json(file , StorageConfig.PARAMS_DIR)
def make_request(self, url):
"""Makes a GET request
Args:
url (str): url to webbsite
"""
headers = {'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Origin': 'https://www.premierleague.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'
}
params = (('pageSize', '100'),)
response = requests.get(url, params = params, headers=headers)
return response.status_code
def check_current_season(self):
"""
Checks if request gives response code 200
"""
failed = {}
league = self.leagues
print('Checking leagues..')
for league_name, league_id in tqdm(league.items()):
status = self.make_request(f'https://footballapi.pulselive.com/football/competitions/{league_id}/compseasons/current')
if status != 200:
failed.update({league_name:league_id})
print(failed)
return failed
def remove_failed_leagues(self, failed_leagues):
"""Removes failed leagues from .json file
Args:
failed_leagues (dict): dict with leagues existing in initial file
"""
league = self.import_id('season_params.json')
deleted = []
print('Deleting failed leagues..')
for failed in failed_leagues.keys():
if failed in league:
del league[failed]
deleted.append(failed)
print("Below leagues have been removed from", self.league_file)
print("\n".join(deleted))
self.dir.save_json('season_params', league, StorageConfig.PARAMS_DIR)
def check_stats_urls(self):
failed = {}
self.fb.load_leagues()
#loads league and their seasons from season_params.json
league_season_info = self.dir.load_json('season_params.json', StorageConfig.PARAMS_DIR)
#Iterates over league-season in league_season_info
for league, season in league_season_info.items():
seasons = self.fb.leagues[str(league)].load_seasons()
#Iterates over season_label and ID in seasons
for season_label, season_id in seasons.items():
s_id = season_id['id']
#Gets teams for a specific season
league_teams = self.fb.leagues[str(league)].seasons[str(season_label)].load_teams()
for team in league_teams.keys():
status = self.make_request(
f'https://footballapi.pulselive.com/football/teams/{team}/compseasons/{s_id}/staff')
if status != 200 and league not in failed:
failed.update({s_id:league})
print(failed)
return failed
def main(self):
return self.remove_failed_leagues(self.check_current_season())
if __name__ == '__main__':
# ValidateParams().main()
# Dir = Directory()
fb = Football()
# lg = League()
# fx = FixtureInfo()
fb.load_leagues()
pprint(fb.leagues['EN_PR'].load_seasons())
pprint(fb.leagues['EN_PR'].seasons['2019/2020'].load_teams())
# pprint(fb.leagues['EN_PR'].seasons['2016/2017'].teams['Arsenal'].load_players())
# ds = fb.leagues['EU_CL'].load_seasons()
# fb.leagues['EU_CL'].seasons['2016/2017'].load_teams()
# pprint(fb.leagues['EU_CL'].seasons['2016/2017'].teams['Atlético'].load_players())
| 34.497268
| 165
| 0.606368
| 9,286
| 0.735408
| 0
| 0
| 0
| 0
| 0
| 0
| 5,358
| 0.424329
|
a53ba70350cca6563c1076848345c71f8f783379
| 826
|
py
|
Python
|
web/checkout/models.py
|
Arvind-4/E-Commerce
|
d7d2f395a4e64a683dd73fed29c627a2210f479a
|
[
"MIT"
] | null | null | null |
web/checkout/models.py
|
Arvind-4/E-Commerce
|
d7d2f395a4e64a683dd73fed29c627a2210f479a
|
[
"MIT"
] | null | null | null |
web/checkout/models.py
|
Arvind-4/E-Commerce
|
d7d2f395a4e64a683dd73fed29c627a2210f479a
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth import get_user_model
from django_countries import countries
COUNTRY_CHOICES = tuple(countries)
User = get_user_model()
# Create your models here.
class Checkout(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
email = models.EmailField()
address = models.CharField(max_length=100)
city = models.CharField(max_length=100)
state = models.CharField(max_length=100)
number = models.IntegerField()
zip_code = models.IntegerField()
country = models.CharField(choices=COUNTRY_CHOICES, max_length=100)
updated = models.DateTimeField(auto_now=True)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f'{self.name}|{self.email}'
| 30.592593
| 71
| 0.746973
| 620
| 0.750605
| 0
| 0
| 0
| 0
| 0
| 0
| 53
| 0.064165
|
a53bcdd38f44a14806e05907ccae272513b9cf1c
| 1,787
|
py
|
Python
|
archive/least_squares_BCES.py
|
Alexander-Serov/abortive-initiation-analysis
|
2a036a5186459b79e7cdbd84aa8a7b130226b5e1
|
[
"MIT"
] | null | null | null |
archive/least_squares_BCES.py
|
Alexander-Serov/abortive-initiation-analysis
|
2a036a5186459b79e7cdbd84aa8a7b130226b5e1
|
[
"MIT"
] | null | null | null |
archive/least_squares_BCES.py
|
Alexander-Serov/abortive-initiation-analysis
|
2a036a5186459b79e7cdbd84aa8a7b130226b5e1
|
[
"MIT"
] | null | null | null |
import numpy as np
def least_squares_BCES(Y1, Y2, V11, V22, V12=0, origin=False):
"""
Make a least-squares fit for non-NaN values taking into account the errors in both rho and J variables. This implementation is based on Akritas1996 article. It is a generalization of the least-squares method. The variance of the slope is also calculated. The intersect is checked to be 0, otherwise a warning is issued.
The fit is performed for the model
X2i = alpha + beta * X1i + ei
Yki = Xki + eki
alpha = 0
so the slope is for X2(X1) function and not the inverse.
If origin == True, no intersect assumed. This doesn't change the lest-squares slope, but changes it's error estimate.
Input:
vectors of data points and errors corresponding to different embryos and ncs.
Output:
(beta, beta_V, alpha, alpha_V)
"""
# Find and drop nans
inds_not_nan = list(set(np.flatnonzero(~np.isnan(Y1))) & set(
np.flatnonzero(~np.isnan(Y2))))
Y1, Y2, V11, V22 = [v[inds_not_nan] for v in (Y1, Y2, V11, V22)]
Y1m = Y1.mean()
Y2m = Y2.mean()
n = len(Y1)
# Estimates for slope (beta) and intersect (alpha)
beta = (
np.sum((Y1 - Y1m) * (Y2 - Y2m) - V12) /
np.sum((Y1 - Y1m)**2 - V11)
)
if not origin:
alpha = (Y2m - beta * Y1m)
else:
alpha = 0
# Error on the estimates
ksi = ((Y1 - Y1m) * (Y2 - beta * Y1 - alpha) + beta * V11 - V12) / (Y1.var() - V11.mean())
zeta = Y2 - beta * Y1 - Y1m * ksi
beta_V = ksi.var() / n
alpha_V = zeta.var() / n
# T, _, _, _ = np.linalg.lstsq(slopes[:, np.newaxis], Ns, rcond=None)
# print(beta, np.sqrt(beta_V), alpha, np.sqrt(alpha_V))
# print('Finished!')
return (beta, beta_V, alpha, alpha_V)
| 33.716981
| 323
| 0.613318
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,016
| 0.568551
|
a53c77391ca18888fe3d4f6374d65264bcebc717
| 7,696
|
py
|
Python
|
tests/test_face.py
|
andfranklin/ErnosCube
|
a9dd7feda4bc0e9162cd884cd450f47c6b19c350
|
[
"MIT"
] | null | null | null |
tests/test_face.py
|
andfranklin/ErnosCube
|
a9dd7feda4bc0e9162cd884cd450f47c6b19c350
|
[
"MIT"
] | 4
|
2020-10-28T19:27:47.000Z
|
2020-11-04T00:12:25.000Z
|
tests/test_face.py
|
andfranklin/ErnosCube
|
a9dd7feda4bc0e9162cd884cd450f47c6b19c350
|
[
"MIT"
] | null | null | null |
from ErnosCube.face_enum import FaceEnum
from ErnosCube.orient_enum import OrientEnum
from ErnosCube.sticker import Sticker
from ErnosCube.face import Face
from ErnosCube.face import RowFaceSlice, ColFaceSlice
from plane_rotatable_tests import PlaneRotatableTests
from hypothesis import given
from strategies import sticker_matrices
from strategies_face import faces, faces_minus_c2, faces_minus_c4
from utils import N_and_flatten
from copy import deepcopy
from pytest import mark, fixture
class TestFace(PlaneRotatableTests):
"""Collection of all tests run on instances of the Face Class."""
objs = faces
objs_minus_c2 = faces_minus_c2
objs_minus_c4 = faces_minus_c4
@given(sticker_matrices)
def construction_test(self, sticker_matrix):
Face(*N_and_flatten(sticker_matrix))
@fixture
def front_face(self):
sticker_matrix = []
for i in range(3):
row = [Sticker(FaceEnum.FRONT, OrientEnum.UP) for _ in range(3)]
sticker_matrix.append(row)
return Face(*N_and_flatten(sticker_matrix))
@mark.dependency(depends=["construction"])
@given(faces)
def test_str(self, face):
gold = f"Face(N={face.N})"
assert str(face) == gold
@mark.dependency(depends=["construction"])
def test_repr(self, front_face):
gold = "\x1b[7m\x1b[1m\x1b[32m ↑ \x1b[0m\x1b[7m\x1b[1m\x1b[32m ↑ \x1b"
gold += "[0m\x1b[7m\x1b[1m\x1b[32m ↑ \x1b[0m\n\x1b[7m\x1b[1m\x1b[32m ↑"
gold += " \x1b[0m\x1b[7m\x1b[1m\x1b[32m ↑ \x1b[0m\x1b[7m\x1b[1m\x1b"
gold += "[32m ↑ \x1b[0m\n\x1b[7m\x1b[1m\x1b[32m ↑ \x1b[0m\x1b[7m\x1b"
gold += "[1m\x1b[32m ↑ \x1b[0m\x1b[7m\x1b[1m\x1b[32m ↑ \x1b[0m"
err_str = f"{repr(front_face)}: {repr(repr(front_face))}"
assert repr(front_face) == gold, err_str
@mark.dependency(depends=["construction"])
def test_get_raw_repr_size(self, front_face):
assert front_face.get_raw_repr_size() == 9
def rotate_cw_test(self):
sticker_mat = []
s00 = Sticker(FaceEnum.FRONT, OrientEnum.UP)
s01 = Sticker(FaceEnum.RIGHT, OrientEnum.RIGHT)
s02 = Sticker(FaceEnum.BACK, OrientEnum.DOWN)
sticker_mat.append([s00, s01, s02])
s10 = Sticker(FaceEnum.LEFT, OrientEnum.LEFT)
s11 = Sticker(FaceEnum.UP, OrientEnum.UP)
s12 = Sticker(FaceEnum.DOWN, OrientEnum.RIGHT)
sticker_mat.append([s10, s11, s12])
s20 = Sticker(FaceEnum.FRONT, OrientEnum.DOWN)
s21 = Sticker(FaceEnum.RIGHT, OrientEnum.LEFT)
s22 = Sticker(FaceEnum.BACK, OrientEnum.UP)
sticker_mat.append([s20, s21, s22])
comp_face = Face(*N_and_flatten(sticker_mat))
cw_sticker_mat = []
sticker_row = [s20, s10, s00]
cw_sticker_mat.append([deepcopy(s).rotate_cw() for s in sticker_row])
sticker_row = [s21, s11, s01]
cw_sticker_mat.append([deepcopy(s).rotate_cw() for s in sticker_row])
sticker_row = [s22, s12, s02]
cw_sticker_mat.append([deepcopy(s).rotate_cw() for s in sticker_row])
cw_comp_face = Face(*N_and_flatten(cw_sticker_mat))
assert (
comp_face.rotate_cw() == cw_comp_face
), f"failed for {str(comp_face)}\n{repr(comp_face)}"
def rotate_ccw_test(self):
ccw_sticker_mat = []
s00 = Sticker(FaceEnum.FRONT, OrientEnum.UP)
s01 = Sticker(FaceEnum.RIGHT, OrientEnum.RIGHT)
s02 = Sticker(FaceEnum.BACK, OrientEnum.DOWN)
ccw_sticker_mat.append([s00, s01, s02])
s10 = Sticker(FaceEnum.LEFT, OrientEnum.LEFT)
s11 = Sticker(FaceEnum.UP, OrientEnum.UP)
s12 = Sticker(FaceEnum.DOWN, OrientEnum.RIGHT)
ccw_sticker_mat.append([s10, s11, s12])
s20 = Sticker(FaceEnum.FRONT, OrientEnum.DOWN)
s21 = Sticker(FaceEnum.RIGHT, OrientEnum.LEFT)
s22 = Sticker(FaceEnum.BACK, OrientEnum.UP)
ccw_sticker_mat.append([s20, s21, s22])
ccw_comp_face = Face(*N_and_flatten(ccw_sticker_mat))
sticker_mat = []
sticker_row = [s20, s10, s00]
sticker_mat.append([deepcopy(s).rotate_cw() for s in sticker_row])
sticker_row = [s21, s11, s01]
sticker_mat.append([deepcopy(s).rotate_cw() for s in sticker_row])
sticker_row = [s22, s12, s02]
sticker_mat.append([deepcopy(s).rotate_cw() for s in sticker_row])
comp_face = Face(*N_and_flatten(sticker_mat))
assert (
comp_face.rotate_ccw() == ccw_comp_face
), f"failed for {str(comp_face)}\n{repr(comp_face)}"
def rotate_ht_test(self):
sticker_mat = []
s00 = Sticker(FaceEnum.FRONT, OrientEnum.UP)
s01 = Sticker(FaceEnum.RIGHT, OrientEnum.RIGHT)
s02 = Sticker(FaceEnum.BACK, OrientEnum.DOWN)
sticker_mat.append([s00, s01, s02])
s10 = Sticker(FaceEnum.LEFT, OrientEnum.LEFT)
s11 = Sticker(FaceEnum.UP, OrientEnum.UP)
s12 = Sticker(FaceEnum.DOWN, OrientEnum.RIGHT)
sticker_mat.append([s10, s11, s12])
s20 = Sticker(FaceEnum.FRONT, OrientEnum.DOWN)
s21 = Sticker(FaceEnum.RIGHT, OrientEnum.LEFT)
s22 = Sticker(FaceEnum.BACK, OrientEnum.UP)
sticker_mat.append([s20, s21, s22])
comp_face = Face(*N_and_flatten(sticker_mat))
ht_sticker_mat = []
sticker_row = [s22, s21, s20]
ht_sticker_mat.append([deepcopy(s).rotate_ht() for s in sticker_row])
sticker_row = [s12, s11, s10]
ht_sticker_mat.append([deepcopy(s).rotate_ht() for s in sticker_row])
sticker_row = [s02, s01, s00]
ht_sticker_mat.append([deepcopy(s).rotate_ht() for s in sticker_row])
ht_comp_face = Face(*N_and_flatten(ht_sticker_mat))
assert (
comp_face.rotate_ht() == ht_comp_face
), f"failed for {str(comp_face)}\n{repr(comp_face)}"
def stickers_and_face(self):
s1 = Sticker(FaceEnum.FRONT, OrientEnum.UP)
s2 = Sticker(FaceEnum.BACK, OrientEnum.RIGHT)
s3 = Sticker(FaceEnum.LEFT, OrientEnum.DOWN)
stickers = [s1, s2, s3]
cs = Sticker(FaceEnum.RIGHT, OrientEnum.LEFT)
face_stickers = []
face_stickers.append([cs, s1, cs])
face_stickers.append([s1, s2, s3])
face_stickers.append([cs, s3, cs])
return stickers, Face(*N_and_flatten(face_stickers))
@mark.dependency(name="get_row_slice", depends=["construction"])
def test_get_row_slice(self):
stickers, face = self.stickers_and_face()
face_slice = face.get_row_slice(1)
assert isinstance(face_slice, RowFaceSlice)
assert all(a == b for a, b in zip(face_slice.stickers, stickers))
@mark.dependency(name="get_col_slice", depends=["construction"])
def test_get_col_slice(self):
stickers, face = self.stickers_and_face()
face_slice = face.get_col_slice(1)
assert isinstance(face_slice, ColFaceSlice)
assert all(a == b for a, b in zip(face_slice.stickers, stickers))
@mark.dependency(depends=["get_row_slice"])
def test_apply_row_slice(self):
stickers, face = self.stickers_and_face()
face_slice = face.get_row_slice(1)
face.apply_slice(face_slice, 0)
for col_indx in range(face.N):
assert face[0, col_indx] == stickers[col_indx], f"\n{repr(face)}"
@mark.dependency(depends=["get_col_slice"])
def test_apply_col_slice(self):
stickers, face = self.stickers_and_face()
face_slice = face.get_col_slice(1)
face.apply_slice(face_slice, 0)
for row_indx in range(face.N):
assert face[row_indx, 0] == stickers[row_indx], f"\n{repr(face)}"
| 37
| 79
| 0.652677
| 7,220
| 0.935961
| 0
| 0
| 2,566
| 0.332642
| 0
| 0
| 762
| 0.098781
|
a53cb8a72414679c109b52c99f7c00abcac934ad
| 19,752
|
py
|
Python
|
tests/test_djangoes.py
|
Exirel/djangoes
|
7fee0ec0383077fc8ac5da8515c51a0b304f84be
|
[
"CC0-1.0"
] | 4
|
2015-01-05T21:04:20.000Z
|
2015-09-16T12:56:47.000Z
|
tests/test_djangoes.py
|
Exirel/djangoes
|
7fee0ec0383077fc8ac5da8515c51a0b304f84be
|
[
"CC0-1.0"
] | 15
|
2015-01-14T10:08:01.000Z
|
2021-06-02T07:09:49.000Z
|
tests/test_djangoes.py
|
Exirel/djangoes
|
7fee0ec0383077fc8ac5da8515c51a0b304f84be
|
[
"CC0-1.0"
] | 2
|
2015-02-17T11:11:31.000Z
|
2016-05-06T07:11:24.000Z
|
from unittest.case import TestCase
from django.core.exceptions import ImproperlyConfigured
from django.test.utils import override_settings
from djangoes import (ConnectionHandler,
IndexDoesNotExist,
ConnectionDoesNotExist,
load_backend)
from djangoes.backends.abstracts import Base
from djangoes.backends import elasticsearch
class TestConnectionHandler(TestCase):
"""Test the ConnectionHandler class.
The ConnectionHandler is a major entry point for a good integration of
ElasticSearch in a Django project. It must ensure appropriate default
values, settings conformity, and prepare tests settings.
"""
# Test behavior with the default and/or empty values
# ==================================================
# Makes assertions about the default behavior when nothing is configured,
# or when very few information is given. Using djangoes should be as
# transparent as possible, in particular with the default behavior.
def test_empty(self):
"""Assert an empty configuration fallback on default values."""
servers = {}
indices = {}
handler = ConnectionHandler(servers, indices)
# A default alias appear in servers, while nothing changed in indices.
assert handler.servers == {'default': {}}
assert handler.indices == indices
def test_empty_with_default(self):
"""Assert the ensured default configuration is acceptable as input."""
servers = {
'default': {
'ENGINE': 'djangoes.backends.elasticsearch.SimpleHttpBackend',
'HOSTS': [],
'PARAMS': {},
'INDICES': []
}
}
indices = {
'index': {
'NAME': 'index',
'ALIASES': []
}
}
handler = ConnectionHandler(servers, indices)
# Both must be equal, without changes.
assert handler.servers == servers
assert handler.indices == indices
def test_empty_with_default_fallback(self):
"""Assert the fallback configuration is acceptable as input."""
servers = {
'default': {}
}
indices = {}
handler = ConnectionHandler(servers, indices)
assert handler.servers == {'default': {}}
assert handler.indices == {}
# Test with django project settings
# =================================
def test_project_settings_by_default(self):
"""Assert values come from the django project settings if not given."""
servers = {
'default': {},
'by_settings': {}
}
indices = {
'index_by_settings': {}
}
with override_settings(ES_SERVERS=servers, ES_INDICES=indices):
# No argument
handler = ConnectionHandler()
# Servers and indices are the one set in django settings.
assert handler.servers == servers
assert handler.indices == indices
# Test improperly configured behaviors
# ====================================
def test_improperly_configured_servers(self):
"""Assert raise when settings are not empty but without `default`."""
servers = {
'not_default': {}
}
handler = ConnectionHandler(servers, {})
with self.assertRaises(ImproperlyConfigured) as raised:
# A simple call to servers must raise.
handler.servers
assert str(raised.exception) == "You must define a 'default' ElasticSearch server"
# Test ensure default values
# ==========================
# Server
def test_empty_ensure_server_defaults(self):
"""Assert default values are set properly on an empty server."""
handler = ConnectionHandler({}, {})
handler.ensure_server_defaults('default')
default_server = handler.servers['default']
expected_server = {
'ENGINE': 'djangoes.backends.elasticsearch.SimpleHttpBackend',
'HOSTS': [],
'PARAMS': {},
'INDICES': []
}
assert default_server == expected_server
def test_ensure_server_defaults_not_exists(self):
"""Assert raise when the argument given is not a configured server."""
servers = {}
indices = {}
handler = ConnectionHandler(servers, indices)
with self.assertRaises(ConnectionDoesNotExist) as raised:
handler.ensure_server_defaults('index')
assert str(raised.exception) == '%r' % 'index'
# Index
def test_empty_ensure_index_defaults(self):
"""Assert default values are set properly on an empty index."""
indices = {
'index': {}
}
handler = ConnectionHandler({}, indices)
handler.ensure_index_defaults('index')
index = handler.indices['index']
expected_index = {
'NAME': 'index',
'ALIASES': [],
'SETTINGS': None,
}
assert index == expected_index
def test_ensure_index_defaults_not_exists(self):
"""Assert raise when the argument given is not a configured index."""
servers = {}
indices = {}
handler = ConnectionHandler(servers, indices)
with self.assertRaises(IndexDoesNotExist) as raised:
handler.ensure_index_defaults('index')
assert str(raised.exception) == '%r' % 'index'
# Test prepare test settings
# ==========================
# Prepare server
def test_empty_prepare_server_test_settings(self):
"""Assert prepare adds a TEST key in the defaul server's settings."""
servers = {
'default': {
'ENGINE': 'djangoes.backends.elasticsearch.SimpleHttpBackend'
}
}
handler = ConnectionHandler(servers, {})
handler.prepare_server_test_settings('default')
default_server = handler.servers['default']
expected_test_server = {
'INDICES': []
}
assert 'TEST' in default_server
assert default_server['TEST'] == expected_test_server
def test_prepare_server_test_settings_not_exists(self):
"""Assert raise when the argument given is not a configured server."""
servers = {
'default': {
'ENGINE': 'djangoes.backends.elasticsearch.SimpleHttpBackend'
}
}
indices = {}
handler = ConnectionHandler(servers, indices)
with self.assertRaises(ConnectionDoesNotExist) as raised:
handler.prepare_server_test_settings('index')
assert str(raised.exception) == '%r' % 'index'
# Prepare index
def test_empty_prepare_index_test_settings(self):
indices = {
'index': {}
}
handler = ConnectionHandler({}, indices)
handler.ensure_index_defaults('index')
handler.prepare_index_test_settings('index')
index = handler.indices['index']
expected_test_index = {
'NAME': 'index_test',
'ALIASES': [],
'SETTINGS': None,
}
assert 'TEST' in index
assert index['TEST'] == expected_test_index
def test_prepare_index_test_settings_not_exists(self):
"""Assert raise when the argument given is not a configured index."""
servers = {}
indices = {}
handler = ConnectionHandler(servers, indices)
with self.assertRaises(IndexDoesNotExist) as raised:
handler.prepare_index_test_settings('index')
assert str(raised.exception) == '%r' % 'index'
def test_prepare_index_test_settings_use_alias_not_index_name(self):
"""Assert raise even if the index NAME is given as argument.
The prepare_index_test_settings method expects an index alias as used
in the indices dict, not its NAME (nor any of its ALIASES).
"""
servers = {}
indices = {
'index': {
'NAME': 'not_this_index',
'ALIASES': ['not_this_index']
}
}
handler = ConnectionHandler(servers, indices)
with self.assertRaises(IndexDoesNotExist) as raised:
handler.prepare_index_test_settings('not_this_index')
assert str(raised.exception) == '%r' % 'not_this_index'
def test_prepare_index_test_settings_name_improperly_configured(self):
"""Assert raise when name and test name are the same."""
servers = {}
indices = {
'index': {
'NAME': 'index_production_name',
'ALIASES': [],
'TEST': {
'NAME': 'index_production_name',
'ALIASES': [],
}
}
}
handler = ConnectionHandler(servers, indices)
with self.assertRaises(ImproperlyConfigured) as raised:
# A simple call to servers must raise.
handler.prepare_index_test_settings('index')
assert str(raised.exception) == (
'Index \'index\' uses improperly the same NAME and TEST\'s NAME '
'settings: \'index_production_name\'.'
)
def test_prepare_index_test_settings_aliases_improperly_configured(self):
"""Assert raise when name and test name are the same."""
servers = {}
indices = {
'index': {
'NAME': 'index',
'ALIASES': ['alias_prod', 'alias_prod_2'],
'TEST': {
'NAME': 'index_valid_test_name',
'ALIASES': ['alias_prod', 'alias_test']
}
}
}
handler = ConnectionHandler(servers, indices)
handler.ensure_index_defaults('index')
with self.assertRaises(ImproperlyConfigured) as raised:
# A simple call to servers must raise.
handler.prepare_index_test_settings('index')
assert str(raised.exception) == (
'Index \'index\' uses improperly the same index alias in ALIASES '
'and in TEST\'s ALIASES settings: \'alias_prod\'.'
)
# Test get server indices
# =======================
def test_empty_get_server_indices(self):
"""Assert there is no index by default, ie. `_all` will be used.
ElasticSearch allows query on all indices. It is not safe for testing
purposes, but it does not have to be checked in the connection handler.
"""
handler = ConnectionHandler({}, {})
# Yes, it is acceptable to get indices from a non-configured servers.
# The purpose of get_server_indices is not to validate the input.
test_server = {
'INDICES': []
}
indices = handler.get_server_indices(test_server)
assert indices == {}
def test_get_server_indices(self):
"""Assert indices are found for a given server."""
servers = {}
indices = {
'used': {},
'not_used': {}
}
handler = ConnectionHandler(servers, indices)
test_server = {
'INDICES': ['used'],
}
indices = handler.get_server_indices(test_server)
expected_indices = {
'used': {
'NAME': 'used',
'ALIASES': [],
'SETTINGS': None,
'TEST': {
'NAME': 'used_test',
'ALIASES': [],
'SETTINGS': None,
}
}
}
assert indices == expected_indices
# Test backend loading
# ====================
# Backend loading takes the given settings to import a module and
# instantiate a subclass of djangoes.backends.Base.
def test_function_load_backend(self):
"""Assert load_backend function imports and returns the given path.
An external function is used to import a module attribute from an
import path: it extracts the module import path and the attribute name,
then it imports the module and get its attribute, catching
``ImportError`` and ``AttributeError`` to raise a djangoes custom error
instead of basic errors.
"""
datetime_class = load_backend('datetime.datetime')
assert hasattr(datetime_class, 'now')
isfile_function = load_backend('os.path.isfile')
assert type(isfile_function) == type(lambda x: x)
with self.assertRaises(ImproperlyConfigured) as raised:
load_backend('module.does.not.exist')
assert str(raised.exception) == '\n'.join(
["'module.does.not.exist' isn't an available ElasticSearch backend.",
"Error was: No module named 'module'"])
with self.assertRaises(ImproperlyConfigured) as raised:
load_backend('os.path.not_exist')
assert str(raised.exception) == '\n'.join(
["'os.path.not_exist' isn't an available ElasticSearch backend.",
"Error was: 'module' object has no attribute 'not_exist'"])
def test_load_backend(self):
"""Assert load_backend method loads the configured server engine."""
servers = {
'default': {
'ENGINE': 'tests.backend.ConnectionWrapper'
}
}
indices = {}
handler = ConnectionHandler(servers, indices)
result = handler.load_backend('default')
assert isinstance(result, Base)
assert result.alias == 'default'
assert result.indices == []
assert result.index_names == []
assert result.alias_names == []
def test_load_backend_with_index(self):
servers = {
'default': {
'ENGINE': 'tests.backend.ConnectionWrapper',
'INDICES': ['index_1'],
}
}
indices = {
'index_1': {
'NAME': 'index_1',
'ALIASES': ['alias_1', 'alias_2'],
}
}
handler = ConnectionHandler(servers, indices)
result = handler.load_backend('default')
assert sorted(result.indices) == ['alias_1', 'alias_2']
assert result.index_names == ['index_1']
assert sorted(result.alias_names) == ['alias_1', 'alias_2']
def test_load_backend_with_indices(self):
servers = {
'default': {
'ENGINE': 'tests.backend.ConnectionWrapper',
'INDICES': ['index_1', 'index_2'],
}
}
indices = {
'index_1': {
'NAME': 'index_1',
'ALIASES': ['alias_1', 'alias_2'],
},
'index_2': {
'NAME': 'index_2_name',
}
}
handler = ConnectionHandler(servers, indices)
result = handler.load_backend('default')
assert sorted(result.indices) == ['alias_1', 'alias_2', 'index_2_name']
assert sorted(result.index_names) == ['index_1', 'index_2_name']
assert sorted(result.alias_names) == ['alias_1', 'alias_2']
# Test loading of backends.elasticsearch
# ======================================
def test_loading_elasticsearch(self):
servers = {
'default': {
'ENGINE': 'djangoes.backends.elasticsearch.SimpleHttpBackend'
}
}
indices = {}
handler = ConnectionHandler(servers, indices)
result = handler.load_backend('default')
assert isinstance(result, elasticsearch.SimpleHttpBackend)
# Test object and attributes manipulation
# =======================================
def test_iterable(self):
"""Assertions about list behavior of ConnectionHandler."""
servers = {
'default': {},
'task': {},
}
indices = {}
handler = ConnectionHandler(servers, indices)
assert sorted(list(handler)) == ['default', 'task']
def test_items(self):
"""Assertions about key:value behavior of ConnectionHandler."""
servers = {
'default': {
'ENGINE': 'tests.backend.ConnectionWrapper',
'INDICES': ['index_1'],
},
}
indices = {
'index_1': {},
'index_2': {}
}
handler = ConnectionHandler(servers, indices)
# Get the connection wrapper
wrapper = handler['default']
assert wrapper.indices == ['index_1']
# Change handler settings
handler.servers['default']['INDICES'] = ['index_2']
# The wrapper is not updated
wrapper = handler['default']
assert wrapper.indices == ['index_1']
# Delete the `default` connection
del handler['default']
# The new wrapper now use the new index
wrapper = handler['default']
assert wrapper.indices == ['index_2']
# Also, set item works without control
handler['something'] = 'else'
assert handler['something'] == 'else'
def test_all(self):
"""Assert all connection wrappers are returned."""
servers = {
'default': {
'ENGINE': 'tests.backend.ConnectionWrapper',
},
'task': {
'ENGINE': 'tests.backend.ConnectionWrapper'
}
}
indices = {}
handler = ConnectionHandler(servers, indices)
all_connections = handler.all()
assert len(all_connections) == 2
assert isinstance(all_connections[0], Base)
assert isinstance(all_connections[1], Base)
assert sorted([c.alias for c in all_connections]) == ['default', 'task']
def test_check_for_multiprocess(self):
"""Assert method will reset connections with a different PID.
.. note::
We don't really test "multi-processing" behavior. We are only
messing with a flag here to test connections reset.
"""
servers = {
'default': {
'HOSTS': ['localhost']
}
}
handler = ConnectionHandler(servers, {})
conn = handler['default']
conn_again = handler['default']
assert conn is conn_again
assert id(conn) == id(conn_again)
# Changing the PID to "reset" connections.
handler._pid = 1
conn_again = handler['default']
assert conn is not conn_again
assert id(conn) != id(conn_again)
class TestProxyConnectionHandler(TestCase):
def test_attributes(self):
# Local import to manipulate elements
from djangoes import connections, connection
connections._servers = {
'default': {
'ENGINE': 'tests.backend.ConnectionWrapper'
}
}
connections._indices = {}
# Existing attribute.
assert connection.alias == 'default'
# New attribute.
assert not hasattr(connection, 'new_attribute')
connections['default'].new_attribute = 'test_value'
assert hasattr(connection, 'new_attribute')
assert connection.new_attribute == 'test_value'
del connection.new_attribute
assert not hasattr(connection, 'new_attribute')
assert not hasattr(connections['default'], 'new_attribute')
connection.new_attribute = 'test_new_attribute_again'
assert hasattr(connection, 'new_attribute')
assert hasattr(connections['default'], 'new_attribute')
assert connection == connections['default']
assert not (connection != connections['default'])
| 31.552716
| 90
| 0.571132
| 19,352
| 0.979749
| 0
| 0
| 0
| 0
| 0
| 0
| 7,200
| 0.36452
|
a53ce607d2484b47e38e0b6a97b11b56e4d3bb58
| 8,497
|
py
|
Python
|
bin/yap_conflict_check.py
|
Novartis/yap
|
8399e87e6083e6394d1f9340e308a01751465a03
|
[
"Apache-2.0"
] | 23
|
2015-01-14T21:32:11.000Z
|
2021-07-19T12:59:10.000Z
|
bin/yap_conflict_check.py
|
Novartis/yap
|
8399e87e6083e6394d1f9340e308a01751465a03
|
[
"Apache-2.0"
] | 1
|
2017-06-30T10:54:57.000Z
|
2017-06-30T10:54:57.000Z
|
bin/yap_conflict_check.py
|
Novartis/yap
|
8399e87e6083e6394d1f9340e308a01751465a03
|
[
"Apache-2.0"
] | 9
|
2015-09-02T17:44:24.000Z
|
2021-07-05T18:59:16.000Z
|
#!/usr/bin/env python
"""
Copyright 2014 Novartis Institutes for Biomedical Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
class yap_conflict_check:
"""
Provides methods to perform file-file, file-sample, file-group and
sample-group comparisons and find conflicts.
"""
def __init__(self, input_files):
self.input_files = map(self.translate_path, input_files)
self.filename_dict = \
self.generate_filename_dict(self.input_files)
def translate_path(self, path):
"""
Given a path,
Returns a path after expanding environment and user variables and
relative paths to absolute path
"""
path = os.path.expandvars(path) # expand environment variables
path = os.path.expanduser(path) # expand user's home directory
# don't convert to absolute if just filename
if len(os.path.dirname(path)) == 0 and (path not in ['.', ".."]):
return path
path = os.path.abspath(path) # convert relative path to absolute
return path # return output
def translate_paired_end_paths(self, paired_end_files):
'''
Given a list of paired end files
Returns a new list of paired end files with each file translated
using translate path function
'''
if len(paired_end_files) <= 0:
return [] # return empty o/p
paired_end_files_out = [] # output variable
for paired_list in paired_end_files: # translate each paths
paired_list_out = map(self.translate_path, paired_list)
paired_end_files_out.append(paired_list) # append to o/p
return paired_end_files_out # return output
def get_paths(self, name):
'''
Given a name,
Returns the list of paths matching to the key similar to the
name
'''
if len(name) <= 0:
return None # return null for empty input
# return if an exact match is found
if name in self.filename_dict:
return self.filename_dict[name]
# return all values for a partial match
matches = []
for key in self.filename_dict:
if key.find(name) == 0:
new_paths = self.find_new_items(matches,
self.filename_dict[key])
# extend only if a unique match is found
if len(new_paths) > 0:
matches.extend(new_paths)
if len(matches) == 0:
return None # return null if no matches
else:
return matches # return output
def find_new_items(self, current_list, new_list):
'''
Given two lists,
Returns items which are not available in current lists,
Return empty list if no such items are found
'''
if len(current_list) == 0:
return new_list # all paths are new
# select an items not in current list and return list
return filter((lambda item: item not in current_list),
new_list)
def validate_names_and_find_duplicates(self, names):
'''
Given list of filenames,
Calls validate_names_and_find_duplicates_with_finder with
get_paths as finder and returns the result
'''
return self.validate_names_and_find_duplicates_with_finder(
names,
self.get_paths)
def validate_names_and_find_duplicates_with_finder(self, filenames,
finder):
"""
Input:
--filenames: a list of filenames occured in contaminant file
Check if all filenames exist in input files name and
there is no filename duplicate in filenames.
Return values:
--match_list:
--error_list: all filenames which not exist in input files name
--duplicate_dict: [key:value]
-key: filename which duplicate happens
-value: all path this filename occurs
"""
match_list = []
error_list = []
duplicate_dict = {}
# translate all filenames paths to complete paths
filenames = map(self.strip_space_tab_newline, filenames)
filenames = map(self.translate_path, filenames)
for fn in filenames:
if fn in self.input_files:
# filename exist in self.input_files
match_list.append(fn)
else:
# treat fn as basename
paths = finder(fn)
if paths is not None:
# basename exists
if len(paths) > 1:
# duplicate happens
duplicate_dict[fn] = paths
else:
# no duplicate
match_list.extend(paths)
else:
# basename not exists
error_list.append(fn)
return match_list, error_list, duplicate_dict
def generate_filename_dict(self, paths):
"""
Given a list of complete filepaths,
Returns a dictionary, with keys as filenames and values as list of
all paths that contain the corresponding key
Invariant: Paths contain filenames complete with extension.
"""
output = {} # output variable
if len(paths) <= 0:
return output # return empty output for empty input
for path in paths:
output[path] = [path] # add each path as key also.
basename = os.path.basename(path) # get filename from path
if len(basename) <= 0:
continue # skip if no filename in path
# get name without extension
basename_no_ext = os.path.splitext(basename)[0]
# create a new entry if it does not exist, append otherwise
if basename in output:
output[basename].append(path)
else:
output[basename] = [path]
# include a name with filename without extension also
if len(basename_no_ext) <= 0:
continue # skip if name is exmpty
if basename_no_ext != basename: # add an entry for just filename
if basename_no_ext in output:
output[basename_no_ext].append(path)
else:
output[basename_no_ext] = [path]
return output # return dict
def find_duplicates_in_list(self, input):
"""
Given a list,
Returns a dictionary of all duplicates in the list,
Return empty dictionary if no duplicate entries are found.
"""
output = {} # output variable
if len(input) <= 0:
return output # return empty output for empty input
for item in input:
if item not in output: # check only if item not seen earlier
item_count = input.count(item) # count items
# add to output if item occurs more than once in list
if item_count > 1:
output[item] = item_count
return output
def list_to_sentence(self, list):
"""
Translate the given list to a string.
"""
sentence = ""
for i in range(0, len(list)):
if i == len(list) - 1:
sentence += "'" + list[i] + "'"
else:
sentence += "'" + list[i] + "' and "
return sentence
def strip_space_tab_newline(self, input):
'''
Given a string,
Returns a string after removing starting and trailing spaces,
tabs and new line character
'''
if len(input) <= 0:
return '' # empty o/p for empty i/p
input = input.strip()
input = input.strip('\n')
input = input.strip('\t')
return input
| 37.933036
| 79
| 0.578086
| 7,870
| 0.926209
| 0
| 0
| 0
| 0
| 0
| 0
| 3,894
| 0.458279
|
a53d6a324052f390797cf713195803de6c9fa43f
| 1,148
|
py
|
Python
|
PS4/ps4a.py
|
PanPapag/MIT-OCW-Introduction-to-Computer-Science-and-Programming-in-Python-6.0001
|
f9aeb55c1473920a7d283bfc09726bdef5614331
|
[
"MIT"
] | 3
|
2019-05-20T19:37:49.000Z
|
2020-05-16T08:57:04.000Z
|
PS4/ps4a.py
|
PanPapag/MIT-OCW-6.0001
|
f9aeb55c1473920a7d283bfc09726bdef5614331
|
[
"MIT"
] | null | null | null |
PS4/ps4a.py
|
PanPapag/MIT-OCW-6.0001
|
f9aeb55c1473920a7d283bfc09726bdef5614331
|
[
"MIT"
] | null | null | null |
def get_permutations(sequence):
'''
Enumerate all permutations of a given string
sequence (string): an arbitrary string to permute. Assume that it is a
non-empty string.
You MUST use recursion for this part. Non-recursive solutions will not be
accepted.
Returns: a list of all permutations of sequence
Example:
>>> get_permutations('abc')
['abc', 'acb', 'bac', 'bca', 'cab', 'cba']
Note: depending on your implementation, you may return the permutations in
a different order than what is listed here.
'''
if len(sequence) == 0 or len(sequence) == 1:
result = [sequence]
else:
x = sequence[0]
permutations = get_permutations(sequence[1:])
result = []
for p in permutations:
for i in range(len(p) + 1):
result.append(p[:i] + x + p[i:])
return result
if __name__ == '__main__':
example_input = 'abc'
print('Input:', example_input)
print('Expected Output:', ['abc', 'acb', 'bac', 'bca', 'cab', 'cba'])
print('Actual Output:', get_permutations(example_input))
| 30.210526
| 79
| 0.595819
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 629
| 0.547909
|
a53fd665444f0740f577cb5726aba9622f38b8eb
| 3,246
|
py
|
Python
|
streambox/test/regression.py
|
chenzongxiong/streambox
|
76f95780d1bf6c02731e39d8ac73937cea352b95
|
[
"Unlicense"
] | 3
|
2019-07-03T14:03:31.000Z
|
2021-12-19T10:18:49.000Z
|
streambox/test/regression.py
|
chenzongxiong/streambox
|
76f95780d1bf6c02731e39d8ac73937cea352b95
|
[
"Unlicense"
] | 6
|
2020-02-17T12:01:30.000Z
|
2021-12-09T22:02:33.000Z
|
streambox/test/regression.py
|
chenzongxiong/streambox
|
76f95780d1bf6c02731e39d8ac73937cea352b95
|
[
"Unlicense"
] | 2
|
2020-12-03T04:41:18.000Z
|
2021-01-11T21:44:42.000Z
|
all_tests = [
{
"name" : "wc-fast",
"exec" : "./test-wc.bin",
"records" : 1000 * 1000, # records per epoch
"record_size" : 100,
"target_ms" : 1000,
"input_file" : "/ssd/1g.txt",
# --- optional: soft delay --- #
#"softdelay_maxbad_ratio" : 0.1, # okay if aonmaly delay % is less than this in a window
#"softdelay_maxbad_ms" : 2000, # upper bound of anonmaly delay
# --- optional --- #
# "cores" : 54, # if unspecified, fall back to app default
"tput_baseline" : 5200, # used to be compared with the test results
"tput_hint" : 5000, # the throughput value that test should try first
# --- control --- #
# "disable" : True # skip the test
},
{
"name" : "wingrep-fast",
"exec" : "./test-wingrep.bin",
"records" : 1000 * 1000, # records per epoch
"record_size" : 1000,
"target_ms" : 1000,
"input_file" : "/ssd/9g.txt",
# --- optional --- #
# "cores" : 54, # if unspecified, fall back to app default
"tput_baseline" : 38500, # used to be compared with the test results
"tput_hint" : 37000, # the throughput value that test should try first
# --- control --- #
# "disable" : True # XXX skip the test
},
{
"name" : "test-join-2-fast",
"exec" : "./test-join-2.bin",
"records" : 1000 * 1000, # records per epoch
"record_size" : 8, #sizeof(long)
"target_ms" : 1000,
"input_file" : "/ssd/test-digit.txt",
# --- optional --- #
# "cores" : 54, # if unspecified, fall back to app default
"tput_baseline" : 5200, # used to be compared with the test results
"tput_hint" : 5000, # the throughput value that test should try first
# --- control --- #
# "disable" : True # XXX skip the test
},
{
"name" : "test-distinct-fast",
"exec" : "./test-distinct.bin",
"records" : 1000 * 1000, # records per epoch
"record_size" : 100,
"target_ms" : 1000,
"input_file" : "/ssd/train-out.txt",
# --- optional --- #
# "cores" : 54, # if unspecified, fall back to app default
"tput_baseline" : 2000, # xzl: can do 2000? used to be compared with the test results
"tput_hint" : 2000, # the throughput value that test should try first
# --- control --- #
# "disable" : True # XXX skip the test
},
{
"name" : "networklatency-fast",
"exec" : "./networklatency.bin",
"records" : 500 * 1000, # records per epoch
"record_size" : 40, #sizeof(struct srcdst_rtt)
"target_ms" : 1000,
"input_file" : "/ssd/region_Raw_PingmeshData.result",
# --- optional --- #
# "cores" : 54, # if unspecified, fall back to app default
"tput_baseline" : 1000, # xzl: 878
"tput_hint" : 800, # the throughput value that test should try first
# --- control --- #
# "disable" : True # XXX skip the test
},
{
"name" : "test-tweet-fast",
"exec" : "./test-tweet.bin",
"records" : 1000 * 1000, # records per epoch
"record_size" : 200,
"target_ms" : 1000,
"input_file" : "/ssd/twitter_download/filtered_tweets.txt",
# --- optional --- #
# "cores" : 54, # if unspecified, fall back to app default
"tput_baseline" : 5000, # used to be compared with the test results
"tput_hint" : 4000, # the throughput value that test should try first
# --- control --- #
# "disable" : True # XXX skip the test
},
]
| 33.8125
| 90
| 0.608133
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,566
| 0.790511
|
a5411aefaed2b9a42a7bcdc0b02b6093311e2594
| 277
|
py
|
Python
|
Algo and DSA/LeetCode-Solutions-master/Python/vowels-of-all-substrings.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 3,269
|
2018-10-12T01:29:40.000Z
|
2022-03-31T17:58:41.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/vowels-of-all-substrings.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 53
|
2018-12-16T22:54:20.000Z
|
2022-02-25T08:31:20.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/vowels-of-all-substrings.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 1,236
|
2018-10-12T02:51:40.000Z
|
2022-03-30T13:30:37.000Z
|
# Time: O(n)
# Space: O(1)
class Solution(object):
def countVowels(self, word):
"""
:type word: str
:rtype: int
"""
VOWELS = set("aeiou")
return sum((i-0+1) * ((len(word)-1)-i+1) for i, c in enumerate(word) if c in VOWELS)
| 23.083333
| 92
| 0.505415
| 247
| 0.891697
| 0
| 0
| 0
| 0
| 0
| 0
| 92
| 0.33213
|
a541ad6227bc2976b930cd5ee28105b474b1a9e3
| 1,350
|
py
|
Python
|
flash_test/utils/log.py
|
nikolas-hermanns/flash-test
|
dda642e96f76113b42a7d64415eb3d8cdc03fca5
|
[
"Apache-2.0"
] | null | null | null |
flash_test/utils/log.py
|
nikolas-hermanns/flash-test
|
dda642e96f76113b42a7d64415eb3d8cdc03fca5
|
[
"Apache-2.0"
] | null | null | null |
flash_test/utils/log.py
|
nikolas-hermanns/flash-test
|
dda642e96f76113b42a7d64415eb3d8cdc03fca5
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Jan 16, 2016
@author: enikher
'''
import logging
import datetime
LOG = logging.getLogger(__name__)
LOG_LEVEL = logging.DEBUG
LOG_PATH = "./dlService.log"
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
filename=LOG_PATH,
datefmt='%Y-%m-%dT:%H:%M:%s', level=LOG_LEVEL)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
console.setFormatter(formatter)
LOG.addHandler(console)
def log_enter_exit(func):
def inner(self, *args, **kwargs):
LOG.debug(("Entering %(cls)s.%(method)s "
"args: %(args)s, kwargs: %(kwargs)s") %
{'cls': self.__class__.__name__,
'method': func.__name__,
'args': args,
'kwargs': kwargs})
start = datetime.datetime.now()
ret = func(self, *args, **kwargs)
end = datetime.datetime.now()
LOG.debug(("Exiting %(cls)s.%(method)s. "
"Spent %(duration)s sec. "
"Return %(return)s") %
{'cls': self.__class__.__name__,
'duration': end - start,
'method': func.__name__,
'return': ret})
return ret
return inner
| 31.395349
| 71
| 0.545926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 365
| 0.27037
|
a544d907f7886fe6cd4c85cf8051e8844cf738ac
| 54
|
py
|
Python
|
flask_api/app/common/__init__.py
|
brennanhfredericks/network-monitor-server
|
7c811d7851aee5d069569306c46dff39d8d52400
|
[
"MIT"
] | null | null | null |
flask_api/app/common/__init__.py
|
brennanhfredericks/network-monitor-server
|
7c811d7851aee5d069569306c46dff39d8d52400
|
[
"MIT"
] | null | null | null |
flask_api/app/common/__init__.py
|
brennanhfredericks/network-monitor-server
|
7c811d7851aee5d069569306c46dff39d8d52400
|
[
"MIT"
] | null | null | null |
from .appifaceprog import api
from .database import db
| 27
| 29
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a546651f1dcad01340583064244d142fb1215fd5
| 1,061
|
py
|
Python
|
EasyPortfolioExplorer/app/utils/resource_loader.py
|
jblemoine/EasyPortfolioExplorer
|
88484a1acb8f41f7497129ffefc89608af2d34d5
|
[
"MIT"
] | null | null | null |
EasyPortfolioExplorer/app/utils/resource_loader.py
|
jblemoine/EasyPortfolioExplorer
|
88484a1acb8f41f7497129ffefc89608af2d34d5
|
[
"MIT"
] | null | null | null |
EasyPortfolioExplorer/app/utils/resource_loader.py
|
jblemoine/EasyPortfolioExplorer
|
88484a1acb8f41f7497129ffefc89608af2d34d5
|
[
"MIT"
] | 1
|
2018-05-07T23:44:40.000Z
|
2018-05-07T23:44:40.000Z
|
from EasyPortfolioExplorer.app.easy.base import EasyBase
class ResourceLoader(EasyBase):
"""
Class for adding external resources such as css and js file.
The current version is based on boostrap 3.3.7.
"""
def __init__(self, **kwargs):
super(ResourceLoader, self).__init__(**kwargs)
self._css_urls = [
'https://cdn.rawgit.com/jblemoine/EasyPortfolioExplorer/117125bb/EasyPortfolioExplorer/app/static/extra.css',
'https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css',
]
self._js_urls = [
'https://code.jquery.com/'
'jquery-3.1.1.slim.min.js',
'https://maxcdn.bootstrapcdn.com/'
'bootstrap/3.3.7/js/bootstrap.min.js',
'/static/extra.js'
]
def load_resources(self):
for url in self._css_urls:
self.app.css.append_css({'external_url': url})
for url in self._js_urls:
self.app.scripts.append_script({'external_url': url})
| 33.15625
| 122
| 0.600377
| 999
| 0.941565
| 0
| 0
| 0
| 0
| 0
| 0
| 481
| 0.453346
|
a5477960eb696e3d1bcdbdddd2a93a52054fb340
| 11,884
|
py
|
Python
|
pybind/slxos/v16r_1_00b/brocade_firmware_rpc/dad_status/output/dad_status_entries/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/brocade_firmware_rpc/dad_status/output/dad_status_entries/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/brocade_firmware_rpc/dad_status/output/dad_status_entries/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class dad_status_entries(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-firmware - based on the path /brocade_firmware_rpc/dad-status/output/dad-status-entries. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__index','__date_and_time_info','__message',)
_yang_name = 'dad-status-entries'
_rest_name = 'dad-status-entries'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__index = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="index", rest_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Sequence number for the message'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='uint32', is_config=True)
self.__message = YANGDynClass(base=unicode, is_leaf=True, yang_name="message", rest_name="message", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Textual description of the status'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='string', is_config=True)
self.__date_and_time_info = YANGDynClass(base=unicode, is_leaf=True, yang_name="date-and-time-info", rest_name="date-and-time-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Date and time of the message. The format is YYYY-MM-DD/HH:MM:SS.SSSS (micro seconds)'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='string', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_firmware_rpc', u'dad-status', u'output', u'dad-status-entries']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'dad-status', u'output', u'dad-status-entries']
def _get_index(self):
"""
Getter method for index, mapped from YANG variable /brocade_firmware_rpc/dad_status/output/dad_status_entries/index (uint32)
"""
return self.__index
def _set_index(self, v, load=False):
"""
Setter method for index, mapped from YANG variable /brocade_firmware_rpc/dad_status/output/dad_status_entries/index (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_index is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_index() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="index", rest_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Sequence number for the message'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """index must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="index", rest_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Sequence number for the message'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='uint32', is_config=True)""",
})
self.__index = t
if hasattr(self, '_set'):
self._set()
def _unset_index(self):
self.__index = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="index", rest_name="index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Sequence number for the message'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='uint32', is_config=True)
def _get_date_and_time_info(self):
"""
Getter method for date_and_time_info, mapped from YANG variable /brocade_firmware_rpc/dad_status/output/dad_status_entries/date_and_time_info (string)
"""
return self.__date_and_time_info
def _set_date_and_time_info(self, v, load=False):
"""
Setter method for date_and_time_info, mapped from YANG variable /brocade_firmware_rpc/dad_status/output/dad_status_entries/date_and_time_info (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_date_and_time_info is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_date_and_time_info() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="date-and-time-info", rest_name="date-and-time-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Date and time of the message. The format is YYYY-MM-DD/HH:MM:SS.SSSS (micro seconds)'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """date_and_time_info must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="date-and-time-info", rest_name="date-and-time-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Date and time of the message. The format is YYYY-MM-DD/HH:MM:SS.SSSS (micro seconds)'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='string', is_config=True)""",
})
self.__date_and_time_info = t
if hasattr(self, '_set'):
self._set()
def _unset_date_and_time_info(self):
self.__date_and_time_info = YANGDynClass(base=unicode, is_leaf=True, yang_name="date-and-time-info", rest_name="date-and-time-info", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Date and time of the message. The format is YYYY-MM-DD/HH:MM:SS.SSSS (micro seconds)'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='string', is_config=True)
def _get_message(self):
"""
Getter method for message, mapped from YANG variable /brocade_firmware_rpc/dad_status/output/dad_status_entries/message (string)
"""
return self.__message
def _set_message(self, v, load=False):
"""
Setter method for message, mapped from YANG variable /brocade_firmware_rpc/dad_status/output/dad_status_entries/message (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_message is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_message() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="message", rest_name="message", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Textual description of the status'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """message must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="message", rest_name="message", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Textual description of the status'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='string', is_config=True)""",
})
self.__message = t
if hasattr(self, '_set'):
self._set()
def _unset_message(self):
self.__message = YANGDynClass(base=unicode, is_leaf=True, yang_name="message", rest_name="message", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Textual description of the status'}}, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='string', is_config=True)
index = __builtin__.property(_get_index, _set_index)
date_and_time_info = __builtin__.property(_get_date_and_time_info, _set_date_and_time_info)
message = __builtin__.property(_get_message, _set_message)
_pyangbind_elements = {'index': index, 'date_and_time_info': date_and_time_info, 'message': message, }
| 61.57513
| 496
| 0.73073
| 11,490
| 0.966846
| 0
| 0
| 0
| 0
| 0
| 0
| 5,670
| 0.477112
|
a548314328afb7d3cb5f380d9d16cde6403fb2e0
| 1,275
|
py
|
Python
|
lintcode/Easy/085_Insert_Node_in_a_Binary_Search_Tree.py
|
Rhadow/leetcode
|
43209626720321113dbfbac67b3841e6efb4fab3
|
[
"MIT"
] | 3
|
2017-04-03T12:18:24.000Z
|
2018-06-25T08:31:04.000Z
|
lintcode/Easy/085_Insert_Node_in_a_Binary_Search_Tree.py
|
Rhadow/leetcode
|
43209626720321113dbfbac67b3841e6efb4fab3
|
[
"MIT"
] | null | null | null |
lintcode/Easy/085_Insert_Node_in_a_Binary_Search_Tree.py
|
Rhadow/leetcode
|
43209626720321113dbfbac67b3841e6efb4fab3
|
[
"MIT"
] | null | null | null |
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: The root of the binary search tree.
@param node: insert this node into the binary search tree.
@return: The root of the new binary search tree.
"""
def insertNode(self, root, node):
# write your code here
# Iteration
if (root is None):
return node
parent = None
target = root
dir = None
while (target):
parent = target
if (target.val < node.val):
target = target.right
dir = 'right'
else:
target = target.left
dir = 'left'
if (target is None):
if (dir == 'left'):
parent.left = node
else:
parent.right = node
break
return root;
# Recursion
# if (root is None):
# return node
# if (root.val > node.val):
# root.left = self.insertNode(root.left, node)
# else:
# root.right = self.insertNode(root.right, node)
# return root
| 27.12766
| 62
| 0.481569
| 1,131
| 0.887059
| 0
| 0
| 0
| 0
| 0
| 0
| 571
| 0.447843
|
a5488a57c13d79bfc459f46fd458c1c896f8b4d3
| 1,268
|
py
|
Python
|
Python/1289.MatrixSpiral.py
|
nizD/LeetCode-Solutions
|
7f4ca37bab795e0d6f9bfd9148a8fe3b62aa5349
|
[
"MIT"
] | 263
|
2020-10-05T18:47:29.000Z
|
2022-03-31T19:44:46.000Z
|
Python/1289.MatrixSpiral.py
|
nizD/LeetCode-Solutions
|
7f4ca37bab795e0d6f9bfd9148a8fe3b62aa5349
|
[
"MIT"
] | 1,264
|
2020-10-05T18:13:05.000Z
|
2022-03-31T23:16:35.000Z
|
Python/1289.MatrixSpiral.py
|
nizD/LeetCode-Solutions
|
7f4ca37bab795e0d6f9bfd9148a8fe3b62aa5349
|
[
"MIT"
] | 760
|
2020-10-05T18:22:51.000Z
|
2022-03-29T06:06:20.000Z
|
"""This program takes a matrix of size mxn as input, and prints the matrix in a spiral format
for example: input ->> [[1,2,3],
[4,5,6],
[7,8,9],
[10,11,12]]
output ->> 1 2 3 6 9 12 11 10 7 4 5 8"""
class Solution:
def matrix_spiral(self, matrix):
"""
:type matrix: list[list[]]
"""
starting_row = 0
ending_row = len(matrix)
starting_col = 0
ending_col = len(matrix[0])
while starting_row < ending_row and starting_col < ending_col:
for k in range(starting_col, ending_col):
print(matrix[starting_row][k], end=" ")
starting_row += 1
for k in range(starting_row, ending_row):
print(matrix[k][ending_col-1], end=" ")
ending_col -= 1
if starting_row < ending_row:
for k in range(ending_col-1, starting_col-1, -1):
print(matrix[ending_row-1][k], end=" ")
ending_row -= 1
if starting_col < ending_col:
for k in range(ending_row-1, starting_row-1, -1):
print(matrix[k][starting_col], end=" ")
starting_col += 1
| 37.294118
| 93
| 0.502366
| 982
| 0.774448
| 0
| 0
| 0
| 0
| 0
| 0
| 344
| 0.271293
|
a54a675c308dee0b53b78a00aef279613875fd2d
| 4,694
|
py
|
Python
|
lib/sde.py
|
NCIA-Diffusion/ScoreSDE
|
b5a562908daf66e6dcf0b791beb83f1fcb61174b
|
[
"MIT"
] | 2
|
2022-03-02T06:54:28.000Z
|
2022-03-02T06:56:45.000Z
|
lib/sde.py
|
NCIA-Diffusion/ScoreSDE
|
b5a562908daf66e6dcf0b791beb83f1fcb61174b
|
[
"MIT"
] | null | null | null |
lib/sde.py
|
NCIA-Diffusion/ScoreSDE
|
b5a562908daf66e6dcf0b791beb83f1fcb61174b
|
[
"MIT"
] | 2
|
2022-02-23T11:49:15.000Z
|
2022-03-02T06:56:46.000Z
|
import abc
import numpy as np
import torch
import torch.nn as nn
class AbstractSDE(abc.ABC):
def __init__(self):
super().__init__()
self.N = 1000
@property
@abc.abstractmethod
def T(self):
"""End time of the SDE."""
raise NotImplementedError
@abc.abstractmethod
def sde(self, x_t, t):
"""Compute the drift/diffusion of the forward SDE
dx = b(x_t, t)dt + s(x_t, t)dW
"""
raise NotImplementedError
@abc.abstractmethod
def marginal_prob(self, x_0, t):
"""Compute the mean/std of the transitional kernel
p(x_t | x_0).
"""
raise NotImplementedError
@abc.abstractmethod
def prior_logp(self, z):
"""Compute log-density of the prior distribution."""
raise NotImplementedError
@abc.abstractmethod
def scale_start_to_noise(self, t):
"""Compute the scale of conversion
from the original image estimation loss, i.e, || x_0 - x_0_pred ||
to the noise prediction loss, i.e, || e - e_pred ||.
Denoting the output of this function by C,
C * || x_0 - x_0_pred || = || e - e_pred || holds.
"""
raise NotImplementedError
# @abc.abstractmethod
# def proposal_distribution(self):
# raise NotImplementedError
def reverse(self, model, model_pred_type='noise'):
"""The reverse-time SDE."""
sde_fn = self.sde
marginal_fn = self.marginal_prob
class RSDE(self.__class__):
def __init__(self):
pass
def score_fn(self, x_t, t):
if model_pred_type == 'noise':
x_noise_pred = model(x_t, t)
_, x_std = marginal_fn(
torch.zeros_like(x_t),
t,
)
score = -x_noise_pred / x_std
elif model_pred_type == 'original':
x_0_pred = model(x_t, t)
x_mean, x_std = marginal_fn(
x_0_pred,
t
)
score = (x_mean - x_t) / x_std
return score
def sde(self, x_t, t):
# Get score function values
score = self.score_fn(x_t, t)
# Forward SDE's drift & diffusion
drift, diffusion = sde_fn(x_t, t)
# Reverse SDE's drift & diffusion (Anderson, 1982)
drift = drift - diffusion ** 2 * score
return drift, diffusion
return RSDE()
class VPSDE(AbstractSDE):
def __init__(self, beta_min=0.1, beta_max=20, N=1000):
super().__init__()
self.beta_0 = beta_min
self.beta_1 = beta_max
self.N = N
self.discrete_betas = torch.linspace(beta_min / N, beta_max / N, N)
self.alphas = 1. - self.discrete_betas
# self.IS_dist, self.norm_const = self.proposal_distribution()
@property
def T(self):
return 1
def sde(self, x_t, t):
beta_t = (self.beta_0 + t * (self.beta_1 - self.beta_0))[:, None, None, None]
drift = -0.5 * beta_t * x_t
diffusion = torch.sqrt(beta_t)
return drift, diffusion
def marginal_prob(self, x_0, t):
log_mean_coeff = (
-0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
)[:, None, None, None]
marginal_mean = torch.exp(log_mean_coeff) * x_0
marginal_std = torch.sqrt(1. - torch.exp(2. * log_mean_coeff))
return marginal_mean, marginal_std
def prior_logp(self, z):
shape = z.shape
N = np.prod(shape[1:])
logps = - N / 2. * np.log(2 * np.pi) - torch.sum(z ** 2, dim=(1, 2, 3)) / 2.
return logps
def scale_start_to_noise(self, t):
log_mean_coeff = (
-0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
)[:, None, None, None]
marginal_coeff = torch.exp(log_mean_coeff)
marginal_std = torch.sqrt(1. - torch.exp(2. * log_mean_coeff))
scale = marginal_coeff / (marginal_std + 1e-12)
return scale
# def proposal_distribution(self):
# def g2(t):
# return self.beta_0 + t * (self.beta_1 - self.beta_0)
# def a2(t):
# log_mean_coeff = -0.25 * t ** 2 * (self.beta_1 - self.beta_0) \
# - 0.5 * t * self.beta_0
# return 1. - torch.exp(2. * log_mean_coeff)
# t = torch.arange(1, 1001) / 1000
# p = g2(t) / a2(t)
# normalizing_const = p.sum()
# return p, normalizing_const
| 31.503356
| 85
| 0.532169
| 4,620
| 0.984235
| 0
| 0
| 1,071
| 0.228164
| 0
| 0
| 1,281
| 0.272902
|
a54a95f758a5621c7d99991bc1935abca6851391
| 799
|
py
|
Python
|
tests/lanczos/build.py
|
weikengchen/Libra
|
7ad48800febee0d4426a6146d54906476b7acc5a
|
[
"Apache-2.0"
] | 28
|
2020-01-05T12:05:57.000Z
|
2021-11-23T16:18:40.000Z
|
tests/lanczos/build.py
|
weikengchen/Libra
|
7ad48800febee0d4426a6146d54906476b7acc5a
|
[
"Apache-2.0"
] | 1
|
2020-08-10T17:15:38.000Z
|
2020-08-11T16:14:46.000Z
|
tests/lanczos/build.py
|
weikengchen/Libra
|
7ad48800febee0d4426a6146d54906476b7acc5a
|
[
"Apache-2.0"
] | 13
|
2020-01-31T05:53:37.000Z
|
2021-08-02T14:05:43.000Z
|
import os
os.system('./build.sh')
os.system('g++ parser_sha_data_parallel.cpp -o psdp -O3')
os.system('./psdp lanczos2_16.pws lanczos2_16_112_N=16_rdl.pws lanczos2_112_N=16_circuit.txt lanczos2_112_N=16_meta.txt')
os.system('./psdp lanczos2_16.pws lanczos2_16_176_N=64_rdl.pws lanczos2_176_N=64_circuit.txt lanczos2_176_N=64_meta.txt')
os.system('./psdp lanczos2_16.pws lanczos2_16_304_N=256_rdl.pws lanczos2_304_N=256_circuit.txt lanczos2_304_N=256_meta.txt')
os.system('./psdp lanczos2_16.pws lanczos2_16_560_N=1024_rdl.pws lanczos2_560_N=1024_circuit.txt lanczos2_560_N=1024_meta.txt')
os.system('./psdp lanczos2_16.pws lanczos2_16_1072_N=4096_rdl.pws lanczos2_1072_N=4096_circuit.txt lanczos2_1072_N=4096_meta.txt')
os.system('make -C ../.. linear_gkr_zk')
os.system('cp ../../bin/main_zk .')
| 61.461538
| 130
| 0.807259
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 679
| 0.849812
|
a54b68b3a18c130ef71abef51b17c638d75ff918
| 1,166
|
py
|
Python
|
diagrams/seq-tables.py
|
PerFuchs/master-thesis
|
85386c266fecf72348114bcbafeeb896a9e74601
|
[
"MIT"
] | 1
|
2019-11-02T20:23:03.000Z
|
2019-11-02T20:23:03.000Z
|
diagrams/seq-tables.py
|
PerFuchs/master-thesis
|
85386c266fecf72348114bcbafeeb896a9e74601
|
[
"MIT"
] | null | null | null |
diagrams/seq-tables.py
|
PerFuchs/master-thesis
|
85386c266fecf72348114bcbafeeb896a9e74601
|
[
"MIT"
] | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
from diagrams.base import *
DATASET = DATASET_FOLDER + "ama0302.csv"
def tabulize_data(data_path, output_path):
data = pd.read_csv(data_path)
fix_count(data)
fix_neg(data, "copy")
data["total_time"] = data["End"] - data["Start"]
grouped = data.groupby(["partitioning_base", "Query", "Parallelism"])
data.to_latex(buf=open(output_path, "w"),
columns=["Query", "Count", "Time", "WCOJTime_wcoj", "setup", "ratio"],
header = ["Query", "\\# Result", "\\texttt{BroadcastHashJoin}", "\\texttt{seq}", "setup", "Speedup"],
column_format="lr||r|rr||r",
formatters = {
"ratio": lambda r: str(round(r, 1)),
"Count": lambda c: "{:,}".format(c),
},
escape=False,
index=False
)
tabulize_data(DATASET_FOLDER + "ama0302.csv", GENERATED_PATH + "seq-table-ama0302.tex")
tabulize_data(DATASET_FOLDER + "ama0601.csv", GENERATED_PATH + "seq-table-ama0601.tex")
tabulize_data(DATASET_FOLDER + "snb-sf1.csv", GENERATED_PATH + "seq-table-snb-sf1.tex")
| 32.388889
| 117
| 0.596913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 354
| 0.303602
|
a54b6dc0f255b7a92415a48a23ac09a9d0e01321
| 1,513
|
py
|
Python
|
instance-segmentation/detectron_train_PointRend.py
|
diwgan32/IKEA_ASM_Dataset
|
8f41c15c4a7fb47f53235d2292d0eff8136ae492
|
[
"MIT"
] | null | null | null |
instance-segmentation/detectron_train_PointRend.py
|
diwgan32/IKEA_ASM_Dataset
|
8f41c15c4a7fb47f53235d2292d0eff8136ae492
|
[
"MIT"
] | null | null | null |
instance-segmentation/detectron_train_PointRend.py
|
diwgan32/IKEA_ASM_Dataset
|
8f41c15c4a7fb47f53235d2292d0eff8136ae492
|
[
"MIT"
] | null | null | null |
# Run training with PointRend head
# uses default configuration from detectron2
# The model is initialized via pre-trained coco models from detectron2 model zoo
#
# Fatemeh Saleh <fatemehsadat.saleh@anu.edu.au>
import os
from detectron2.config import get_cfg
from detectron2.data.datasets import register_coco_instances
from detectron2.engine import DefaultTrainer
import sys; sys.path.insert(1, "projects/PointRend")
import point_rend
from detectron2.utils.logger import setup_logger
setup_logger()
if __name__=='__main__':
register_coco_instances("ikea_train", {}, "path/to/annotation/train_manual_coco_format.json", "/path/to/images/")
cfg = get_cfg()
point_rend.add_pointrend_config(cfg)
cfg.merge_from_file("projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco.yaml")
cfg.MODEL.POINT_HEAD.NUM_CLASSES = 7
cfg.DATASETS.TRAIN = ("ikea_train",)
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 2
# initialize training
cfg.MODEL.WEIGHTS = "detectron2://PointRend/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco/164955410/model_final_3c3198.pkl"
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = 0.0025 # pick a good LR
cfg.SOLVER.MAX_ITER = 60000
cfg.SOLVER.STEPS = (20000, 40000)
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 7
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()
| 36.902439
| 134
| 0.769993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 561
| 0.370787
|
a54c32d9df76ea887a0b0bac9c4f21cd01fe50ff
| 3,991
|
py
|
Python
|
verify-local-sendnsca-client/update_nagios_cfg.py
|
raychorn/svn_jenkins_projects
|
93d22c28735c9fe6cb4ac632b6e79d89530e3bfb
|
[
"CC0-1.0"
] | null | null | null |
verify-local-sendnsca-client/update_nagios_cfg.py
|
raychorn/svn_jenkins_projects
|
93d22c28735c9fe6cb4ac632b6e79d89530e3bfb
|
[
"CC0-1.0"
] | null | null | null |
verify-local-sendnsca-client/update_nagios_cfg.py
|
raychorn/svn_jenkins_projects
|
93d22c28735c9fe6cb4ac632b6e79d89530e3bfb
|
[
"CC0-1.0"
] | null | null | null |
import re, os, sys
isBeingDebugged = False if (not os.environ.has_key('WINGDB_ACTIVE')) else int(os.environ['WINGDB_ACTIVE']) == 1
__re__ = re.compile("(?P<commented>(#|))cfg_file=(?P<cfg_file>.*)", re.DOTALL | re.MULTILINE)
def find_nagios_cfg(top,target):
print 'DEBUG: top=%s, target=%s' % (top,target)
for folder,dirs,files in os.walk(top):
if (any([f == target for f in files])):
return os.sep.join([folder,target])
print 'DEBUG: None found !!!'
return None
__top__ = '/usr' if (not isBeingDebugged) else r'J:\@11.1'
fpath = find_nagios_cfg(__top__, 'nagios.cfg')
fdest = sys.argv[-1]
print 'INFO(1): nagios.cfg is %s' % (fpath)
fdest_dir = os.path.dirname(fdest)
fdest_base = os.path.basename(fdest)
toks = fdest_base.split('_')
retirees = toks[1:-1]
if (len(retirees) > 0):
del toks[1:-1]
fdest_base = '_'.join(toks)
fdest = os.sep.join([fdest_dir,fdest_base])
print 'INFO(2): fdest is %s' % (fdest)
print 'INFO: nagios.cfg is %s' % (fpath)
if (os.path.exists(fdest)):
if (os.path.exists(fpath)):
fIn = open(fpath, 'r')
lines = fIn.readlines()
fIn.close()
__temp_path__ = os.path.dirname(fpath)
toks = __temp_path__.split(os.sep)
if (len(toks) > 1):
del toks[-1]
toks.append('tmp')
__temp_path__ = os.sep.join(toks)
if (not os.path.exists(__temp_path__)):
os.mkdir(__temp_path__)
__lines__ = []
__matches__ = []
first_time_used = -1
count = 0
__was__ = False
for l in lines:
__is__ = False
matches = __re__.search(l)
if (matches):
print 'FOUND: %s' % (matches.groupdict())
is_commented = len(matches.groupdict().get('commented','')) > 0
if (not is_commented):
cfg_file = matches.groupdict().get('cfg_file',None)
if (cfg_file):
cfg_file = str(cfg_file).rstrip()
if (cfg_file == fdest):
__was__ = True
__matches__.append(matches.groupdict())
if (first_time_used == -1):
first_time_used = count
else: # is a match but is commented so use the line.
__is__ = True
else: # not a match so use the line.
__is__ = True
if (__is__):
__lines__.append(str(l).rstrip())
count += 1
i = len(__lines__)-1
while (i > 2):
if (len(__lines__[i]) == 0) and (len(__lines__[i-1]) == 0) and (len(__lines__[i-2]) == 0):
del __lines__[i]
i -= 1
if (not __was__):
d = {'commented': '', 'cfg_file': fdest}
print 'APPEND: %s' % (d)
__matches__.append(d)
fOut = open(fpath+'.new', mode='w')
count = 0
for l in __lines__:
print >> fOut, str(l).rstrip()
if (count == first_time_used):
for m in __matches__:
is_commented = len(m.get('commented','')) > 0
comment = ''
if (is_commented):
comment = '#'
cfg_file = m.get('cfg_file',None)
print >> fOut, '%s%s' % (comment,'cfg_file=%s' % (cfg_file))
count += 1
fOut.flush()
fOut.close()
os.remove(fpath)
os.rename(fOut.name,fpath)
else:
print >> sys.stderr, 'WARNING: Cannot find "%s".' % (fpath)
else:
print >> sys.stderr, 'WARNING: Cannot find dest config file "%s"; make sure this file is mentioned on the command line as the 1st argument.' % (fdest)
| 35.954955
| 155
| 0.491606
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 591
| 0.148083
|
a54c3694831528b032a63a41c9cef6f955e863a0
| 11,775
|
py
|
Python
|
dataviva/attrs/views.py
|
dogobox/datavivamaster
|
c89596778e2d8d01a2193b02ca5960bd17f4468d
|
[
"MIT"
] | null | null | null |
dataviva/attrs/views.py
|
dogobox/datavivamaster
|
c89596778e2d8d01a2193b02ca5960bd17f4468d
|
[
"MIT"
] | null | null | null |
dataviva/attrs/views.py
|
dogobox/datavivamaster
|
c89596778e2d8d01a2193b02ca5960bd17f4468d
|
[
"MIT"
] | null | null | null |
import urllib2
from sqlalchemy import func, distinct, asc, desc, and_, or_
from flask import Blueprint, request, jsonify, abort, g, render_template, make_response, redirect, url_for, flash
from dataviva import db, __latest_year__
from dataviva.attrs.models import Bra, Wld, Hs, Isic, Cbo, Yb
from dataviva.secex.models import Yp, Yw
from dataviva.rais.models import Yi, Yo
from dataviva.ask.models import Question
from dataviva.utils.gzip_data import gzip_data
from dataviva.utils.cached_query import cached_query
from dataviva.utils.exist_or_404 import exist_or_404
from dataviva.utils.title_case import title_case
mod = Blueprint('attrs', __name__, url_prefix='/attrs')
@mod.errorhandler(404)
def page_not_found(error):
return error, 404
def fix_name(attr, lang):
name_lang = "name_" + lang
desc_lang = "desc_" + lang
keywords_lang = "keywords_" + lang
if desc_lang in attr:
attr["desc"] = title_case(attr[desc_lang])
if "desc_en" in attr: del attr["desc_en"]
if "desc_pt" in attr: del attr["desc_pt"]
if name_lang in attr:
attr["name"] = title_case(attr[name_lang])
if "name_en" in attr: del attr["name_en"]
if "name_pt" in attr: del attr["name_pt"]
if keywords_lang in attr:
attr["keywords"] = title_case(attr[keywords_lang])
if "keywords_en" in attr: del attr["keywords_en"]
if "keywords_pt" in attr: del attr["keywords_pt"]
return attr
############################################################
# ----------------------------------------------------------
# All attribute views
#
############################################################
@mod.route('/<attr>/')
@mod.route('/<attr>/<Attr_id>/')
def attrs(attr="bra",Attr_id=None):
Attr = globals()[attr.title()]
Attr_weight_mergeid = "{0}_id".format(attr)
if attr == "bra":
Attr_weight_tbl = Yb
Attr_weight_col = "population"
elif attr == "isic":
Attr_weight_tbl = Yi
Attr_weight_col = "num_emp"
elif attr == "cbo":
Attr_weight_tbl = Yo
Attr_weight_col = "num_emp"
elif attr == "hs":
Attr_weight_tbl = Yp
Attr_weight_col = "val_usd"
elif attr == "wld":
Attr_weight_tbl = Yw
Attr_weight_col = "val_usd"
depths = {}
depths["bra"] = [2,4,7,8]
depths["isic"] = [1,3,5]
depths["cbo"] = [1,2,4]
depths["hs"] = [2,4,6]
depths["wld"] = [2,5]
depth = request.args.get('depth', None)
order = request.args.get('order', None)
offset = request.args.get('offset', None)
limit = request.args.get('limit', None)
if offset:
offset = float(offset)
limit = limit or 50
lang = request.args.get('lang', None) or g.locale
ret = {}
dataset = "rais"
if Attr == Cbo or Attr == Hs:
dataset = "secex"
latest_year = __latest_year__[dataset]
cache_id = request.path + lang
if depth:
cache_id = cache_id + "/" + depth
# first lets test if this query is cached
cached_q = cached_query(cache_id)
if cached_q and limit is None:
ret = make_response(cached_q)
ret.headers['Content-Encoding'] = 'gzip'
ret.headers['Content-Length'] = str(len(ret.data))
return ret
# if an ID is supplied only return that
if Attr_id:
# the '.show.' indicates that we are looking for a specific nesting
if ".show." in Attr_id:
this_attr, ret["nesting_level"] = Attr_id.split(".show.")
# filter table by requested nesting level
attrs = Attr.query \
.filter(Attr.id.startswith(this_attr)) \
.filter(func.char_length(Attr.id) == ret["nesting_level"]).all()
# the 'show.' indicates that we are looking for a specific nesting
elif "show." in Attr_id:
ret["nesting_level"] = Attr_id.split(".")[1]
# filter table by requested nesting level
attrs = Attr.query.filter(func.char_length(Attr.id) == ret["nesting_level"]).all()
# the '.' here means we want to see all attrs within a certain distance
elif "." in Attr_id:
this_attr, distance = Attr_id.split(".")
this_attr = Attr.query.get_or_404(this_attr)
attrs = this_attr.get_neighbors(distance)
else:
attrs = [Attr.query.get_or_404(Attr_id)]
ret["data"] = [fix_name(a.serialize(), lang) for a in attrs]
# an ID/filter was not provided
else:
query = db.session.query(Attr,Attr_weight_tbl) \
.outerjoin(Attr_weight_tbl, and_(getattr(Attr_weight_tbl,"{0}_id".format(attr)) == Attr.id, Attr_weight_tbl.year == latest_year))
if depth:
query = query.filter(func.char_length(Attr.id) == depth)
else:
query = query.filter(func.char_length(Attr.id).in_(depths[attr]))
if order:
direction = "asc"
if "." in order:
o, direction = order.split(".")
else:
o = order
if o == "name":
o = "name_{0}".format(lang)
if o == Attr_weight_col:
order_table = Attr_weight_tbl
else:
order_table = Attr
if direction == "asc":
query = query.order_by(asc(getattr(order_table,o)))
elif direction == "desc":
query = query.order_by(desc(getattr(order_table,o)))
if limit:
query = query.limit(limit).offset(offset)
attrs_all = query.all()
# just get items available in DB
attrs_w_data = None
if depth is None and limit is None:
attrs_w_data = db.session.query(Attr, Attr_weight_tbl) \
.filter(getattr(Attr_weight_tbl, Attr_weight_mergeid) == Attr.id) \
.group_by(Attr.id)
# raise Exception(attrs_w_data.all())
attrs_w_data = [a[0].id for a in attrs_w_data]
attrs = []
for i, a in enumerate(attrs_all):
b = a[0].serialize()
if a[1]:
b[Attr_weight_col] = a[1].serialize()[Attr_weight_col]
else:
b[Attr_weight_col] = 0
a = b
if attrs_w_data:
a["available"] = False
if a["id"] in attrs_w_data:
a["available"] = True
if Attr_weight_col == "population" and len(a["id"]) == 8 and a["id"][:2] == "mg":
plr = Bra.query.get_or_404(a["id"]).pr2.first()
if plr: a["plr"] = plr.id
if order:
a["rank"] = int(i+offset+1)
attrs.append(fix_name(a, lang))
ret["data"] = attrs
ret = jsonify(ret)
ret.data = gzip_data(ret.data)
if limit is None and cached_q is None:
cached_query(cache_id, ret.data)
ret.headers['Content-Encoding'] = 'gzip'
ret.headers['Content-Length'] = str(len(ret.data))
return ret
@mod.route('/table/<attr>/<depth>/')
def attrs_table(attr="bra",depth="2"):
g.page_type = "attrs"
data_url = "/attrs/{0}/?depth={1}".format(attr,depth)
return render_template("general/table.html", data_url=data_url)
@mod.route('/search/<term>/')
def attrs_search(term=None):
# Dictionary
bra_query = {}
cbo_query = {}
isic_query = {}
hs_query = {}
question_query = {}
wld = {}
lang = request.args.get('lang', None) or g.locale
result = []
bra = Bra.query.filter(or_(Bra.id == term, or_(Bra.name_pt.ilike("%"+term+"%"), Bra.name_en.ilike("%"+term+"%"))))
items = bra.limit(50).all()
items = [i.serialize() for i in items]
for i in items:
bra_query = {}
bra_query["id"] = i["id"]
bra_query["name_pt"] = i["name_pt"]
if i["id"] == "bra":
icon = "all"
else:
icon = i["id"][0:2]
bra_query["icon"] = "/static/images/icons/bra/bra_" + icon
bra_query["name_en"] = i["name_en"]
bra_query["color"] = i["color"]
bra_query["content_type"] = "bra"
bra_query = fix_name(bra_query, lang)
result.append(bra_query)
if lang == "pt":
cbo = Cbo.query.filter(or_(Cbo.id == term, Cbo.name_pt.ilike("%"+term+"%")))
else:
cbo = Cbo.query.filter(or_(Cbo.id == term, Cbo.name_en.ilike("%"+term+"%")))
items = cbo.limit(50).all()
items = [i.serialize() for i in items]
for i in items:
cbo_query = {}
cbo_query["id"] = i["id"]
cbo_query["name_pt"] = i["name_pt"]
cbo_query["name_en"] = i["name_en"]
cbo_query["color"] = i["color"]
cbo_query["content_type"] = "cbo"
cbo_query = fix_name(cbo_query, lang)
result.append(cbo_query)
isic_match = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u"]
if lang == "pt":
isic = Isic.query.filter(and_(Isic.name_pt.ilike("%"+term+"%"), Isic.id.in_(isic_match)))
else:
isic = Isic.query.filter(and_(Isic.name_en.ilike("%"+term+"%"), Isic.id.in_(isic_match)))
items = isic.limit(50).all()
items = [i.serialize() for i in items]
for i in items:
isic_query = {}
isic_query["id"] = i["id"]
isic_query["name_pt"] = i["name_pt"]
isic_query["name_en"] = i["name_en"]
isic_query["color"] = i["color"]
isic_query["content_type"] = "isic"
isic_query = fix_name(isic_query, lang)
result.append(isic_query)
if lang == "pt":
hs = Hs.query.filter(or_(Hs.id.like("%"+term+"%"), Hs.name_pt.like("%"+term+"%")))
else:
hs = Hs.query.filter(or_(Hs.id.like("%"+term+"%"), Hs.name_en.ilike("%"+term+"%")))
items = hs.limit(50).all()
print(items)
items = [i.serialize() for i in items]
for i in items:
hs_query = {}
hs_query["id"] = i["id"]
hs_query["name_pt"] = i["name_pt"]
hs_query["name_en"] = i["name_en"]
hs_query["color"] = i["color"]
hs_query["content_type"] = "hs"
hs_query = fix_name(hs_query,lang)
result.append(hs_query)
if lang == "pt":
wld = Wld.query.filter(or_(Wld.id == term, Wld.name_pt.like("%"+term+"%")))
else:
wld = Wld.query.filter(or_(Wld.id == term, Wld.name_en.like("%"+term+"%")))
items = wld.limit(50).all()
items = [i.serialize() for i in items]
for i in items:
wld_query = {}
wld_query["id"] = i["id"]
wld_query["name_pt"] = i["name_pt"]
wld_query["name_en"] = i["name_en"]
wld_query["color"] = i["color"]
wld_query["content_type"] = "wld"
wld_query = fix_name(wld_query, lang)
result.append(wld_query)
question = Question.query.filter(and_(Question.language == lang, or_(Question.question.ilike("%"+term+"%"), Question.body.ilike("%"+term+"%"))))
items = question.limit(50).all()
items = [i.serialize() for i in items]
for i in items:
question_query = {}
question_query["id"] = i["slug"]
question_query["name"] = i["question"]
question_query["color"] = '#D67AB0'
question_query["content_type"] = "learnmore"
question_query = fix_name(question_query, lang)
result.append(question_query)
ret = jsonify({"activities":result})
return ret
| 34.429825
| 148
| 0.545563
| 0
| 0
| 0
| 0
| 10,173
| 0.863949
| 0
| 0
| 2,141
| 0.181826
|
a54cde621c4d8d9c2e11ad32222e88ab799ae414
| 701
|
py
|
Python
|
leetcode/easy/sort-array-by-parity.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 8
|
2019-05-14T12:50:29.000Z
|
2022-03-01T09:08:27.000Z
|
leetcode/easy/sort-array-by-parity.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 46
|
2019-03-24T20:59:29.000Z
|
2019-04-09T16:28:43.000Z
|
leetcode/easy/sort-array-by-parity.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 1
|
2022-01-28T12:46:29.000Z
|
2022-01-28T12:46:29.000Z
|
"""
Given an array A of non-negative integers, return an array consisting of all the even elements of A,
followed by all the odd elements of A.
You may return any answer array that satisfies this condition.
Example 1:
Input: [3,1,2,4]
Output: [2,4,3,1]
The outputs [4,2,3,1], [2,4,1,3], and [4,2,1,3] would also be accepted.
Note:
1 <= A.length <= 5000
0 <= A[i] <= 5000
"""
class Solution:
def sortArrayByParity(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
return [element for element in A if not element % 2] + \
[element for element in A if element % 2]
result = Solution().sortArrayByParity([3,1,2,4])
print(result)
| 20.028571
| 100
| 0.617689
| 251
| 0.35806
| 0
| 0
| 0
| 0
| 0
| 0
| 449
| 0.640514
|
a54d9516f3cf42047c6d21bb9815568bd1e67161
| 2,922
|
py
|
Python
|
meerschaum/_internal/docs/index.py
|
bmeares/Meerschaum
|
37bd7a9923efce53e91c6a1d9c31f9533b9b4463
|
[
"Apache-2.0"
] | 32
|
2020-09-14T16:29:19.000Z
|
2022-03-08T00:51:28.000Z
|
meerschaum/_internal/docs/index.py
|
bmeares/Meerschaum
|
37bd7a9923efce53e91c6a1d9c31f9533b9b4463
|
[
"Apache-2.0"
] | 3
|
2020-10-04T20:03:30.000Z
|
2022-02-02T21:04:46.000Z
|
meerschaum/_internal/docs/index.py
|
bmeares/Meerschaum
|
37bd7a9923efce53e91c6a1d9c31f9533b9b4463
|
[
"Apache-2.0"
] | 5
|
2021-04-22T23:49:21.000Z
|
2022-02-02T12:59:08.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
"""
<img src="https://meerschaum.io/assets/banner_1920x320.png" alt="Meerschaum banner">
| PyPI | GitHub | License |
| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ |
|  |  |  |
|  |  | |
# What is Meerschaum?
Meerschaum is a platform for quickly creating and managing time-series data streams called **pipes**. With Meerschaum, you can have a data visualization stack running in minutes.
The power of the Meerschaum system comes from projects like [pandas](https://pandas.pydata.org/), [sqlalchemy](https://www.sqlalchemy.org/), [fastapi](https://fastapi.tiangolo.com/), and more.
# Why Meerschaum?
If you've worked with time-series data, you know the headaches that come with ETL. Meerschaum is a system that makes consolidating and syncing data easy.
Don't rely on scripts that will silently break in a year. Meerschaum instead gives you better tools to define and sync your data streams. And don't worry — you can always incorporate Meerschaum into your existing scripts.
# Quick Start
For a more thorough setup guide, visit the [Getting Started](https://meerschaum.io/get-started/) page at [meerschaum.io](https://meerschaum.io).
## TL;DR
```bash
pip install -U --user meerschaum
mrsm stack up -d db grafana
mrsm bootstrap pipes
```
## Usage Documentation
Please visit [meerschaum.io](https://meerschaum.io) for setup, usage, and troubleshooting information. You can find technical documentation at [docs.meerschaum.io](https://docs.meerschaum.io).
## Plugins
Here is the [list of community plugins](https://meerschaum.io/reference/plugins/list-of-plugins/).
For details on installing, using, and writing plugins, check out the [plugins documentation](https://meerschaum.io/reference/plugins/types-of-plugins) at [meerschaum.io](https://meerschaum.io).
# Support Meerschaum's Development
I'm a full-time graduate student, and I work on Meerschaum in my free time. If you enjoy Meerschaum and want to support its development, you can [buy me a beer (or coffee)](https://www.buymeacoffee.com/bmeares).
"""
| 57.294118
| 292
| 0.647502
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,919
| 0.99829
|
a54e64b95c67ef2ea40471b8b49ce1a8e5671cf2
| 1,065
|
py
|
Python
|
insights/parsers/tests/test_kpatch_patches.py
|
akshay196/insights-core
|
598865e6563119089c77152599300de38a77c72c
|
[
"Apache-2.0"
] | null | null | null |
insights/parsers/tests/test_kpatch_patches.py
|
akshay196/insights-core
|
598865e6563119089c77152599300de38a77c72c
|
[
"Apache-2.0"
] | null | null | null |
insights/parsers/tests/test_kpatch_patches.py
|
akshay196/insights-core
|
598865e6563119089c77152599300de38a77c72c
|
[
"Apache-2.0"
] | null | null | null |
from insights.parsers import kpatch_patches
from insights.tests import context_wrap
from insights.core.plugins import ContentException
import pytest
ASSORTED_KPATCHES = """
asdfasdfasdf_asdfasdfasdf-asdfasdfasdf_asdfasdfasdf.ko
asdfasdfasdf_asdfasdfasdf-asdfasdfasdf_asdfasdfasdf.ko.xz
foo-bar.ko
foo-bar.ko.xz
foo.ko
foo.ko.xz
test_klp_callbacks_demo.ko
test_klp_callbacks_demo.ko.xz
""".strip()
NO_KPATCH = """
/bin/ls: cannot access '/var/lib/kpatch/4.18.0-147.8.el8.x86_64': No such file or directory
""".strip()
# Try a bunch of random potential patch names
# Compare to expected module names
def test_assorted():
kp = kpatch_patches.KpatchPatches(context_wrap(ASSORTED_KPATCHES))
for patch in [
'asdfasdfasdf_asdfasdfasdf_asdfasdfasdf_asdfasdfasdf',
'foo_bar',
'foo',
'test_klp_callbacks_demo']:
assert patch in kp.patches
# Try the case of no patches installed
def test_no_kpatch():
with pytest.raises(ContentException):
kpatch_patches.KpatchPatches(context_wrap(NO_KPATCH))
| 27.307692
| 91
| 0.753991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 527
| 0.494836
|
a54eed00dc082ef6adf720e7a6dc2ace18221748
| 127
|
py
|
Python
|
tests/ev3dev/brick/battery.py
|
GianCann/pybricks-micropython
|
f23cdf7fdf9abd068e7e84ca54d6162b4fc5f72a
|
[
"MIT"
] | null | null | null |
tests/ev3dev/brick/battery.py
|
GianCann/pybricks-micropython
|
f23cdf7fdf9abd068e7e84ca54d6162b4fc5f72a
|
[
"MIT"
] | null | null | null |
tests/ev3dev/brick/battery.py
|
GianCann/pybricks-micropython
|
f23cdf7fdf9abd068e7e84ca54d6162b4fc5f72a
|
[
"MIT"
] | null | null | null |
from pybricks.hubs import EV3Brick
ev3 = EV3Brick()
print(ev3.battery.voltage()) # 7400
print(ev3.battery.current()) # 180
| 18.142857
| 36
| 0.724409
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.086614
|
a5504cacd4d378cc9aecf50aa2070a23b003b4f8
| 3,878
|
py
|
Python
|
app/service/messages/dispatcher.py
|
ryan4yin/flypy-backend
|
7fcc2971ac27d3b44e352dfed73acd12e1913d65
|
[
"MIT"
] | 6
|
2019-03-14T02:39:17.000Z
|
2021-10-31T11:43:58.000Z
|
app/service/messages/dispatcher.py
|
ryan4yin/flypy-backend
|
7fcc2971ac27d3b44e352dfed73acd12e1913d65
|
[
"MIT"
] | null | null | null |
app/service/messages/dispatcher.py
|
ryan4yin/flypy-backend
|
7fcc2971ac27d3b44e352dfed73acd12e1913d65
|
[
"MIT"
] | 2
|
2020-02-04T07:44:37.000Z
|
2021-04-02T23:02:20.000Z
|
# -*- coding: utf-8 -*-
import copy
import logging
from operator import attrgetter
from typing import Dict
from app.service.messages.handler import Handler
logger = logging.getLogger(__name__)
class Dispatcher(object):
"""
消息分派器,暂时忽略 Notice
platform: 平台,目前只有 qq,未来可能会添加 telegtram、wechat
group_id: 群组id,四种可能:private(仅私聊)、group(仅群聊),或者特定的群号
"""
def __init__(self):
self.handlers: Dict[str, Dict[str, list]] = {
"qq": dict(),
"telegram": dict(),
"wechat": dict(),
"default": {
"group": [],
"private": [],
},
}
self.sort_key = attrgetter("weight") # 用于 handles 排序的 key
def get_handlers(self, data: dict):
"""根据消息的内容,返回对应的 handlers 列表"""
platform = data['platform']
message = data['message']
if message['type'] == 'group':
group_id = message['group']['id']
handlers = self.handlers[platform].get(group_id) # 首先考虑使用群自定义的 handlers
if not handlers:
handlers = self.handlers["default"]['group'] # 没有则使用默认 handlers(这个所有平台通用)
elif message['type'] == 'private':
handlers = self.handlers['default']['private'] # 同样是所有平台通用
else:
logger.error("无法解析!消息格式不正确!")
return None
return handlers
def handle_update(self, data: dict):
"""处理消息"""
handlers = self.get_handlers(data)
data_back = copy.deepcopy(data) # 用于回复的 dict,在 data 上稍做修改就行
reply: dict = data_back['message']
reply.update({"text": "", "images": []}) # 先清除收到的消息
if reply['type'] == "group":
reply['group'] = {'id': reply['group']['id']}
# 处理消息
for handler in handlers:
match, res = handler.handle_update(data)
if match:
if reply['type'] == "group":
reply['group']['at_members'] = res.get("at_members")
reply['text'] = res.get('text')
reply['images'] = res.get('images')
elif res is not None: # 解析出现问题
reply['text'] = res.get("message") # 返回错误信息
if reply['text'] or reply['images']: # 有回复消息
return data_back # 这个 dict 会被发送回 qq/telegram 前端
else:
return None # 没有消息要回复
def add_handler(self, handler, platform='default', group_id="group", extra_doc=None):
"""
注册消息处理器,default 表示该处理器为所有平台/群组所通用。
1. 对每条消息而言,只可能触发最多一个消息处理器。处理器之间按权重排序。
:param handler: 需要添加的 handler
:param platform: 有 qq telegram wechat, 和 default
:param group_id: group、private、或者群 id
:param extra_doc: 补充的 docstring,不同的命令,在不同环境下,效果也可能不同
:return:
"""
if not isinstance(handler, Handler):
raise TypeError('handlers is not an instance of {0}'.format(Handler.__name__))
if not isinstance(platform, str):
raise TypeError('platform is not str')
if not isinstance(group_id, str):
raise TypeError('group_id is not str')
if extra_doc: # 添加补充的说明文档
handler.extra_doc = extra_doc
if platform not in self.handlers:
self.handlers[platform] = {
group_id: [handler]
}
elif group_id not in self.handlers[platform]:
self.handlers[platform][group_id] = [handler]
else:
handlers_list = self.handlers[platform][group_id]
handlers_list.append(handler)
handlers_list.sort(key=self.sort_key, reverse=True) # 权重高的优先
def remove_handler(self, handler, platform='default', group_id="group"):
"""移除消息处理器"""
if platform in self.handlers \
and group_id in self.handlers[platform]:
self.handlers[platform][group_id].remove(handler)
| 34.318584
| 90
| 0.566787
| 4,285
| 0.95562
| 0
| 0
| 0
| 0
| 0
| 0
| 1,785
| 0.398082
|
a550d35d092e7a7f432cb53720ad0aa77be59cf6
| 1,247
|
py
|
Python
|
app/core/core_scripts/choices.py
|
Valentin-Golyonko/FlaskTestRPi
|
b9796a9acb2bb1c122301a3ef192f43c857eb27b
|
[
"Apache-2.0"
] | null | null | null |
app/core/core_scripts/choices.py
|
Valentin-Golyonko/FlaskTestRPi
|
b9796a9acb2bb1c122301a3ef192f43c857eb27b
|
[
"Apache-2.0"
] | null | null | null |
app/core/core_scripts/choices.py
|
Valentin-Golyonko/FlaskTestRPi
|
b9796a9acb2bb1c122301a3ef192f43c857eb27b
|
[
"Apache-2.0"
] | null | null | null |
class Choices:
DEVICE_TYPE_BAROMETER = 1
DEVICE_TYPE_LED_STRIP = 2
DEVICE_TYPE_CHOICES = [
(DEVICE_TYPE_BAROMETER, 'Barometer'),
(DEVICE_TYPE_LED_STRIP, 'LED Strip'),
]
DEVICE_SUB_TYPE_BME280 = 1
DEVICE_SUB_TYPE_RGB_STRIP_WITH_ALARM = 2
DEVICE_SUB_TYPE_CHOICES = [
(DEVICE_SUB_TYPE_BME280, 'BME280'),
(DEVICE_SUB_TYPE_RGB_STRIP_WITH_ALARM, 'RGB Strip with alarm'),
]
DEVICE_ADDRESS_TYPE_I2C = 1
DEVICE_ADDRESS_TYPE_ETH_WIFI = 2
DEVICE_ADDRESS_TYPE_BLUETOOTH = 3
DEVICE_ADDRESS_TYPE_CHOICES = [
(DEVICE_ADDRESS_TYPE_I2C, 'I2C'),
(DEVICE_ADDRESS_TYPE_ETH_WIFI, 'Ethernet/WiFi'),
(DEVICE_ADDRESS_TYPE_BLUETOOTH, 'Bluetooth'),
]
FORECAST_UNITS_STANDARD = 1
FORECAST_UNITS_METRIC = 2
FORECAST_UNITS_IMPERIAL = 3
FORECAST_UNITS_CHOICES = [
(FORECAST_UNITS_STANDARD, 'standard'),
(FORECAST_UNITS_METRIC, 'metric'),
(FORECAST_UNITS_IMPERIAL, 'imperial'),
]
FORECAST_UPDATE_PERIOD = 10 # avery 10 min
BAROMETER_UPDATE_PERIOD = 10 # avery 10 min
BAROMETER_DATA_LIMIT = int(60 / BAROMETER_UPDATE_PERIOD) * 24 * 7 # history for 7 days
UPDATE_ALIVE_RGB_STRIP_PERIOD = 5 # avery min
| 31.175
| 91
| 0.694467
| 1,246
| 0.999198
| 0
| 0
| 0
| 0
| 0
| 0
| 170
| 0.136327
|
a55166529d4d734a528fe78b010050a25360e8b0
| 1,647
|
py
|
Python
|
StyleTransferTensorFlow/style.py
|
LordHarsh/Neural_Style_Transfer
|
a3e95cfe13ba89f0f4a529e5c45a7b365b1e27d4
|
[
"MIT"
] | 1
|
2021-01-31T06:41:29.000Z
|
2021-01-31T06:41:29.000Z
|
build/lib/StyleTransferTensorFlow/style.py
|
LordHarsh/Neural_Style_Transfer
|
a3e95cfe13ba89f0f4a529e5c45a7b365b1e27d4
|
[
"MIT"
] | null | null | null |
build/lib/StyleTransferTensorFlow/style.py
|
LordHarsh/Neural_Style_Transfer
|
a3e95cfe13ba89f0f4a529e5c45a7b365b1e27d4
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
from pytube import YouTube
import os
import cv2
from PIL import Image
import shutil
import glob
import ffmpy
def transfer(content_img_path, style_img_path, tfhub_module='https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2'):
content_img = plt.imread(content_img_path)
style_img = plt.imread(style_img_path)
content_img = content_img.astype(np.float32)[np.newaxis, ...] / 255.0
style_img = style_img.astype(np.float32)[np.newaxis, ...] / 255.0
style_img =tf.image.resize(style_img, (265, 265))
hub_module = hub.load(tfhub_module)
outputs = hub_module(tf.constant(content_img), tf.constant(style_img))
stylized_image = outputs[0]
return stylized_image
def StyleTransferVideo(list_content_img_path, style_img_path, tfhub_module='https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2'):
content_img_list = [plt.imread(content_img_path) for content_img_path in list_content_img_path]
style_img = plt.imread(style_img_path)
content_img_list = [content_img.astype(np.float32)[np.newaxis, ...] / 255.0 for content_img in content_img_list]
style_img = style_img.astype(np.float32)[np.newaxis, ...] / 255.0
style_img =tf.image.resize(style_img, (265, 265))
hub_module = hub.load(tfhub_module)
outputs = [hub_module(tf.constant(content_img), tf.constant(style_img)) for content_img in content_img_list]
print("-----------Style Transfer completed on all images-----------")
return outputs
if __name__ == "__main__":
import sys
transfer(sys.argv[1], sys.argv[2])
| 37.431818
| 148
| 0.757134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 214
| 0.129933
|
a551e5731106adef0abaef205055eb2d9ca12152
| 15,493
|
py
|
Python
|
bfs/bfs.py
|
NordFk/bfs-soap-api-wrapper
|
f149e33db9a19f325e3ae335bb6682e15b667e6a
|
[
"Apache-2.0"
] | 2
|
2021-11-20T14:16:56.000Z
|
2021-12-15T10:33:01.000Z
|
bfs/bfs.py
|
NordFk/bfs-soap-api-wrapper
|
f149e33db9a19f325e3ae335bb6682e15b667e6a
|
[
"Apache-2.0"
] | null | null | null |
bfs/bfs.py
|
NordFk/bfs-soap-api-wrapper
|
f149e33db9a19f325e3ae335bb6682e15b667e6a
|
[
"Apache-2.0"
] | 2
|
2021-11-20T16:49:38.000Z
|
2021-11-20T21:26:16.000Z
|
from collections import OrderedDict
from zeep import Client
from zeep import xsd
import zeep.helpers
import zeep.exceptions
import logging.config
import re
from .constants import methods
class Bfs:
client = None
factory = None
credentials = None
identifier = None
methods = methods
def __init__(self, config, verbose: bool = False):
self.__init_logging(verbose)
self.__init_client(config)
@staticmethod
def __init_logging(self, verbose: bool = False):
if verbose:
logging.config.dictConfig({
'version': 1,
'formatters': {
'verbose': {
'format': '%(name)s: %(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'zeep.transports': {
'level': 'DEBUG',
'propagate': True,
'handlers': ['console'],
},
}
})
else:
logging.getLogger('zeep').setLevel(logging.ERROR)
def __init_client(self, config: dict):
if self.client is None:
if not 'bricknode' in config:
raise ValueError('"bricknode" element missing from configuration')
if not 'wsdl' in config['bricknode']:
raise ValueError('"wsdl" element missing from "bricknode" configuration')
self.client = Client(config['bricknode']['wsdl'])
self.factory = self.client.type_factory('ns0')
self.credentials = self.factory.Credentials(UserName=config['bricknode']['credentials']['username'],
Password=config['bricknode']['credentials']['password'])
self.identifier = config['bricknode']['identifier']
def get_fields(self, method: str, default_value: bool = True):
"""
Gets fields object based on results object. Mitigates the plural form inconsistency present in the API
:param method:
:param default_value:
:return:
"""
try:
fields_method = getattr(self.factory, method + 'Fields')
except zeep.exceptions.LookupError:
fields_method = getattr(self.factory, method[:-1] + 'Fields')
fields = fields_method()
for key in fields:
fields[key] = default_value
return fields
def get_args(self, method: str):
"""
Gets args object based on results object. Mitigates the plural form inconsistency present in the API
:param method:
:return:
"""
try:
args_method = getattr(self.factory, method + 'Args')
except zeep.exceptions.LookupError:
args_method = getattr(self.factory, method[:-1] + 'Args')
return args_method()
@staticmethod
def get_entity_class_name(method: str):
"""
This method aligns the expected object names with the method that will use it. Eg. CreateAccount uses Account as
object, while the UpdateAccount method uses UpdateAccount objects and arrays thereof.
CreateMessage, on the other hand, uses CreateMessage as object.
:param method:
:return:
"""
# "Create" entities are not prefixed with "Create". Pattern changed for newer additions, omitted below.
method = re.sub('^%s' % 'Create', '', method) if method not in [
'CreateMessages',
'CreateNotes',
'CreateTasks',
'CreateTradingVenues',
'CreateWebhookSubscriptions'
] else method
# "Update" entities are always prefix with "Update". Unless, of course, it is UpdateAllocationProfiles
method = re.sub('^%s' % 'Update', '', method) if method in [
'UpdateAllocationProfiles'
] else method
# Casing anomalies
method = 'UpdateFundCompanies' if method == 'UpdateFundcompanies' else method
method = 'UpdateFundEntities' if method == 'UpdateFundentities' else method
# Inconsistent casing and plural form not at end
method = 'RecurringOrderTemplateAutoGiro' if method == 'RecurringOrderTemplatesAutogiro' else method
# Completely different entity type
method = 'FileInfoUpload' if method == 'File' else method
method = 'SuperTransactions' if method == 'BusinessTransactions' else method
return method
def _resolve_derived_class_from_abstract(self, class_name: str, entity: dict = None):
"""
Resolved any derived classes that we would rather use, based on the contents of the entity
:param class_name: The class name of the potential abstract class
:param entity: The entity used for evaluation
:return:
"""
if entity is None:
return
if class_name == 'CurrencyExchangeOrder':
if 'BuyAmount' in entity.keys():
return getattr(self.factory, 'CurrencyExchangeOrderBuy')
elif 'SellAmount' in entity.keys():
return getattr(self.factory, 'CurrencyExchangeOrderSell')
return None
def get_entity(self, class_name: str, entity: dict = None, skip_validation_for_empty_values: bool = False):
"""
Gets entity object based on method
:param class_name: The class name of the entity
:param entity: Optional entity object to convert
:param skip_validation_for_empty_values: Set this to True to ignore validation that required values are set
:return:
"""
try:
entity_method = getattr(self.factory, class_name)
except zeep.exceptions.LookupError:
try:
entity_method = getattr(self.factory, class_name[:-1])
except zeep.exceptions.LookupError:
entity_method = getattr(self.factory, class_name[:-3] + "y")
derived_entity_method = self._resolve_derived_class_from_abstract(entity_method.name, entity)
if derived_entity_method is not None:
entity_method = derived_entity_method
_entity = entity_method()
if skip_validation_for_empty_values:
for key in [a for a in dir(_entity) if not a.startswith('__')]:
_entity[key] = xsd.SkipValue
if type(entity) is dict:
for key in entity.keys():
_entity[key] = entity[key]
return _entity
def get_entity_array(self, class_name: str, entities: list):
"""
Gets an entity array based on class_name
:param class_name:
:param entities:
:return:
"""
try:
entity_array_method = getattr(self.factory, "ArrayOf" + class_name)
except zeep.exceptions.LookupError:
entity_array_method = getattr(self.factory, "ArrayOf" + class_name[:-1])
return entity_array_method(entities)
def __argument_transform(self, value):
"""
Transforms the argument to suit the soap client
:param value:
:return:
"""
p = re.compile('^[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$')
if isinstance(value, list) and len(value) > 0:
if p.match(value[0]):
return self.factory.ArrayOfGuid(value)
else:
return self.factory.ArrayOfString(value)
return value
def get(self, method: str, args: dict = None, fields: dict = None, raw_result: bool = False):
"""
Makes a call to the API, preparing the request and default fields (true) and adds+transforms the arguments
:param method: The Bricknode API method name
:param args: Any arguments you would like to pass (optional)
:param fields: Any field settings you would like to pass (optional)
:param raw_result: Set to True to get the raw result back (optional)
:return:
"""
_fields = self.get_fields(method)
if type(fields) is dict:
for key in fields.keys():
_fields[key] = fields[key]
_args = self.get_args(method)
if type(args) is dict:
for key in args.keys():
_args[key] = self.__argument_transform(args[key])
query_method = getattr(self.client.service, method)
result = query_method({
'Credentials': self.credentials,
'identify': self.identifier,
'Args': _args,
'Fields': _fields
})
return result if raw_result \
else self.ordered_dict_to_object(self.get_response_rows(zeep.helpers.serialize_object(result), method))
def execute(self, method: str, entities: list = None, skip_validation_for_empty_values: bool = False):
"""
Makes a call to the API, preparing the request and default fields (true) and adds+transforms the arguments
:param method: The Bricknode API method name
:param entities: The entities we want to execute
:param skip_validation_for_empty_values: Set this to True to ignore validation that required values are set
:return:
"""
return self.create(method=method, entities=entities,
skip_validation_for_empty_values=skip_validation_for_empty_values, raw_result=True)
def create(self, method: str, entities: list = None, skip_validation_for_empty_values: bool = False,
raw_result=False):
"""
Makes a call to the API, preparing the request and default fields (true) and adds+transforms the arguments
:param method: The Bricknode API method name
:param entities: The entities we want to create
:param skip_validation_for_empty_values: Set this to True to ignore validation that required values are set
:param raw_result: Set to True to get the raw result back (optional)
:return:
"""
_entities = []
for entity in entities:
_entities.append(entity if type(entity) != dict
else self.get_entity(self.get_entity_class_name(method), entity, skip_validation_for_empty_values))
query_method = getattr(self.client.service, method)
result = query_method({
'Credentials': self.credentials,
'identify': self.identifier,
'Entities': self.get_entity_array(self.get_entity_class_name(method), _entities)
})
return result if raw_result \
else self.ordered_dict_to_object(self.get_response_rows(zeep.helpers.serialize_object(result), method))
def update(self, method: str, entities: list = None, fields: dict = None,
skip_validation_for_empty_values: bool = False, raw_result=False):
"""
Makes a call to the API, preparing the request and default fields (true) and adds+transforms the arguments
:param method: The Bricknode API method name
:param entities: The entities we want to update
:param fields: Any field settings you would like to pass (optional)
:param skip_validation_for_empty_values: Set this to True to ignore validation that required values are set
:param raw_result: Set to True to get the raw result back (optional)
:return:
"""
_fields = self.get_fields(method, False)
if type(fields) is dict:
for key in fields.keys():
_fields[key] = fields[key]
_entities = []
for entity in entities:
_entities.append(entity if type(entity) != dict
else self.get_entity(self.get_entity_class_name(method), entity, skip_validation_for_empty_values))
query_method = getattr(self.client.service, method)
result = query_method({
'Credentials': self.credentials,
'identify': self.identifier,
'Entities': self.get_entity_array(self.get_entity_class_name(method), _entities),
'Fields': _fields
})
return result if raw_result \
else self.ordered_dict_to_object(self.get_response_rows(zeep.helpers.serialize_object(result), method))
def delete(self, method: str, brick_ids: list = None):
"""
Makes a call to the API, preparing the request and default fields (true) and adds+transforms the arguments
:param method: The Bricknode API method name
:param brick_ids: The brickIds of the entities we want to delete
:param skip_validation_for_empty_values: Set this to True to ignore validation that required values are set
:param raw_result: Set to True to get the raw result back (optional)
:return:
"""
query_method = getattr(self.client.service, method)
result = query_method({
'Credentials': self.credentials,
'identify': self.identifier,
'BrickIds': self.__argument_transform(brick_ids)
})
return result
def cancel(self, method: str, entity: dict = None):
"""
Makes a call to the API using the entity as WorkflowTriggerDataEntity property
:param method: The Bricknode API method name
:param entity: The WorkflowTriggerDataEntity we want to supply
:return:
"""
query_method = getattr(self.client.service, method)
result = query_method({
'Credentials': self.credentials,
'identify': self.identifier,
'WorkflowTriggerDataEntity': entity
})
return result
@staticmethod
def get_response_rows(result: dict, method: str):
"""
Gets response rows based on results object. Mitigates the plural form inconsistency present in the API
:param result:
:param method:
:return:
"""
if 'Result' in result.keys() and result['Result'] is not None:
response_field = method + 'ResponseRow' \
if method + 'ResponseRow' in result['Result'] \
else method[:-1] + 'ResponseRow'
if result['Result'][response_field] is not None:
return result['Result'][response_field]
if 'Entities' in result.keys() and result['Entities'] is not None:
class_name = Bfs.get_entity_class_name(method)
response_field = class_name \
if class_name in result['Entities'] \
else class_name[:-1]
if result['Entities'][response_field] is not None:
return result['Entities'][response_field]
@staticmethod
def ordered_dict_to_object(value: dict):
"""
Recursively gets an object based on an ordered dictionary that may contain lists
:param value:
:return:
"""
if isinstance(value, list):
a = []
for item in value:
a.append(Bfs.ordered_dict_to_object(item))
return a
if isinstance(value, OrderedDict):
o = {}
for key, value in value.items():
o[key] = Bfs.ordered_dict_to_object(value)
return o
return value
| 40.033592
| 128
| 0.606274
| 15,302
| 0.987672
| 0
| 0
| 4,105
| 0.264958
| 0
| 0
| 5,995
| 0.386949
|
a554983edfe142d8b785a94b5027ce1bfbe95b20
| 1,370
|
py
|
Python
|
booking_microservice/migrations/versions/7eb209b7ab1e_booking_status.py
|
7552-2020C2-grupo5/bookings-microservice
|
92fd3c8c5e4c8462aa0e7f00e50f3c60680ab161
|
[
"Apache-2.0"
] | null | null | null |
booking_microservice/migrations/versions/7eb209b7ab1e_booking_status.py
|
7552-2020C2-grupo5/bookings-microservice
|
92fd3c8c5e4c8462aa0e7f00e50f3c60680ab161
|
[
"Apache-2.0"
] | null | null | null |
booking_microservice/migrations/versions/7eb209b7ab1e_booking_status.py
|
7552-2020C2-grupo5/bookings-microservice
|
92fd3c8c5e4c8462aa0e7f00e50f3c60680ab161
|
[
"Apache-2.0"
] | null | null | null |
"""booking_status
Revision ID: 7eb209b7ab1e
Revises: 0a95c6679356
Create Date: 2021-02-22 01:19:10.744915
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from booking_microservice.constants import BookingStatus
# revision identifiers, used by Alembic.
revision = '7eb209b7ab1e'
down_revision = '0a95c6679356'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
connection = op.get_bind()
if connection.dialect.name == "postgresql":
status_enum = postgresql.ENUM(
*[x.value for x in BookingStatus.__members__.values()],
name='booking_status'
)
else:
status_enum = sa.Enum(
*[x.value for x in BookingStatus.__members__.values()],
name='booking_status'
)
status_enum.create(op.get_bind())
op.add_column(
'booking',
sa.Column(
'booking_status',
status_enum,
nullable=False,
default=BookingStatus.PENDING.value,
server_default=BookingStatus.PENDING.value,
),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('booking', 'booking_status')
# ### end Alembic commands ###
| 24.909091
| 67
| 0.642336
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 455
| 0.332117
|
a555224273d739957311d97daec8970ec07b9037
| 669
|
py
|
Python
|
cookbookex/c01/3.2.3.py
|
fengchunhui/cookbookex
|
0c97ed92b7963ed6cef9140f3dbd5a559c1d1c79
|
[
"Apache-2.0"
] | null | null | null |
cookbookex/c01/3.2.3.py
|
fengchunhui/cookbookex
|
0c97ed92b7963ed6cef9140f3dbd5a559c1d1c79
|
[
"Apache-2.0"
] | null | null | null |
cookbookex/c01/3.2.3.py
|
fengchunhui/cookbookex
|
0c97ed92b7963ed6cef9140f3dbd5a559c1d1c79
|
[
"Apache-2.0"
] | null | null | null |
records = [('foo', 1, 2), ('bar', 'hello'), ('foo', 3, 4)]
def do_foo(x, y):
print('foo', x, y)
def do_bar(s):
print('bar', s)
for tag, *args in records:
if tag == 'foo':
do_foo(*args)
elif tag == 'bar':
do_bar(*args)#该例子没看懂
line = 'nobody:*:-2:-2:Unprivileged User:/var/empty:/user/bin/flase'
uname, *fields, homedir, sh = line.split(':')
print(uname)
print(fields)
print(homedir)
print(sh)
record = ('ACME', 50, 123.45, (12, 18, 2017))
name, *_, (*_, year) = record
print(name)
print(year)
def sum(items):
head, *tail = items
return head + sum(tail) if tail else head
items = [1, 10, 7, 4, 5, 9]
print(sum(items))#没看懂
| 19.114286
| 68
| 0.578475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 141
| 0.20524
|
a5559dfa11b05a0a8b6fa50b10ff68e791bc3b1c
| 4,577
|
py
|
Python
|
mobilenet_v3_configs.py
|
1e100/mobilenet_v3
|
4c5058db6960741e849294c7701e7fddfa241a15
|
[
"BSD-3-Clause"
] | 8
|
2019-09-25T08:41:27.000Z
|
2020-07-26T12:49:41.000Z
|
mobilenet_v3_configs.py
|
1e100/mobilenet_v3
|
4c5058db6960741e849294c7701e7fddfa241a15
|
[
"BSD-3-Clause"
] | null | null | null |
mobilenet_v3_configs.py
|
1e100/mobilenet_v3
|
4c5058db6960741e849294c7701e7fddfa241a15
|
[
"BSD-3-Clause"
] | 4
|
2019-10-04T21:32:56.000Z
|
2020-08-05T12:36:52.000Z
|
""" Configurations shared between PyTorch and Keras. """
CONFIG = {
"large": [
# in_ch, exp, out_ch, ks, stride, dilation, se, activation
[16, 16, 16, 3, 1, 1, None, "relu"],
[16, 64, 24, 3, 2, 1, None, "relu"],
[24, 72, 24, 3, 1, 1, None, "relu"],
[24, 72, 40, 5, 2, 1, 0.25, "relu"],
[40, 120, 40, 5, 1, 1, 0.25, "relu"],
[40, 120, 40, 5, 1, 1, 0.25, "relu"],
[40, 240, 80, 3, 2, 1, None, "hardswish"],
[80, 200, 80, 3, 1, 1, None, "hardswish"],
[80, 184, 80, 3, 1, 1, None, "hardswish"],
[80, 184, 80, 3, 1, 1, None, "hardswish"],
[80, 480, 112, 3, 1, 1, 0.25, "hardswish"],
[112, 672, 112, 3, 1, 1, 0.25, "hardswish"],
[112, 672, 160, 5, 2, 1, 0.25, "hardswish"],
[160, 960, 160, 5, 1, 1, 0.25, "hardswish"],
[160, 960, 160, 5, 1, 1, 0.25, "hardswish"],
],
"small": [
# in_ch, exp, out_ch, ks, stride, dilation, se, activation
[16, 16, 16, 3, 2, 1, 0.25, "relu"],
[16, 72, 24, 3, 2, 1, None, "relu"],
[24, 88, 24, 3, 1, 1, None, "relu"],
[24, 96, 40, 5, 2, 1, 0.25, "hardswish"],
[40, 240, 40, 5, 1, 1, 0.25, "hardswish"],
[40, 240, 40, 5, 1, 1, 0.25, "hardswish"],
[40, 120, 48, 5, 1, 1, 0.25, "hardswish"],
[48, 144, 48, 5, 1, 1, 0.25, "hardswish"],
[48, 288, 96, 5, 2, 1, 0.25, "hardswish"],
[96, 576, 96, 5, 1, 1, 0.25, "hardswish"],
[96, 576, 96, 5, 1, 1, 0.25, "hardswish"],
],
"large_detection": [
# in_ch, exp, out_ch, ks, stride, dilation, se, activation
[16, 16, 16, 3, 1, 1, None, "relu"],
[16, 64, 24, 3, 2, 1, None, "relu"],
[24, 72, 24, 3, 1, 1, None, "relu"],
[24, 72, 40, 5, 2, 1, 0.25, "relu"],
[40, 120, 40, 5, 1, 1, 0.25, "relu"],
[40, 120, 40, 5, 1, 1, 0.25, "relu"],
[40, 240, 80, 3, 2, 1, None, "hardswish"],
[80, 200, 80, 3, 1, 1, None, "hardswish"],
[80, 184, 80, 3, 1, 1, None, "hardswish"],
[80, 184, 80, 3, 1, 1, None, "hardswish"],
[80, 480, 112, 3, 1, 1, 0.25, "hardswish"],
[112, 672, 112, 3, 1, 1, 0.25, "hardswish"],
[112, 672, 80, 5, 2, 1, 0.25, "hardswish"],
[80, 480, 80, 5, 1, 1, 0.25, "hardswish"],
[80, 480, 80, 5, 1, 1, 0.25, "hardswish"],
],
"small_detection": [
# in_ch, exp, out_ch, ks, stride, dilation, se, activation
[16, 16, 16, 3, 2, 1, 0.25, "relu"],
[16, 72, 24, 3, 2, 1, None, "relu"],
[24, 88, 24, 3, 1, 1, None, "relu"],
[24, 96, 40, 5, 2, 1, 0.25, "hardswish"],
[40, 240, 40, 5, 1, 1, 0.25, "hardswish"],
[40, 240, 40, 5, 1, 1, 0.25, "hardswish"],
[40, 120, 48, 5, 1, 1, 0.25, "hardswish"],
[48, 144, 48, 5, 1, 1, 0.25, "hardswish"],
[48, 288, 48, 5, 2, 1, 0.25, "hardswish"],
[48, 288, 48, 5, 1, 1, 0.25, "hardswish"],
[48, 288, 48, 5, 1, 1, 0.25, "hardswish"],
],
# Stride 16, last 3 blocks dilated by 2.
"large_segmentation": [
# in_ch, exp, out_ch, ks, stride, dilation, se, activation
[16, 16, 16, 3, 1, 1, None, "relu"],
[16, 64, 24, 3, 2, 1, None, "relu"],
[24, 72, 24, 3, 1, 1, None, "relu"],
[24, 72, 40, 5, 2, 1, 0.25, "relu"],
[40, 120, 40, 5, 1, 1, 0.25, "relu"],
[40, 120, 40, 5, 1, 1, 0.25, "relu"],
[40, 240, 80, 3, 2, 1, None, "hardswish"],
[80, 200, 80, 3, 1, 1, None, "hardswish"],
[80, 184, 80, 3, 1, 1, None, "hardswish"],
[80, 184, 80, 3, 1, 1, None, "hardswish"],
[80, 480, 112, 3, 1, 1, 0.25, "hardswish"],
[112, 672, 112, 3, 1, 1, 0.25, "hardswish"],
[112, 672, 80, 5, 1, 2, 0.25, "hardswish"],
[80, 480, 80, 5, 1, 2, 0.25, "hardswish"],
[80, 480, 80, 5, 1, 2, 0.25, "hardswish"],
],
# Stride 16, last 3 blocks dilated by 2.
"small_segmentation": [
# in_ch, exp, out_ch, ks, stride, dilation, se, activation
[16, 16, 16, 3, 2, 1, 0.25, "relu"],
[16, 72, 24, 3, 2, 1, None, "relu"],
[24, 88, 24, 3, 1, 1, None, "relu"],
[24, 96, 40, 5, 2, 1, 0.25, "hardswish"],
[40, 240, 40, 5, 1, 1, 0.25, "hardswish"],
[40, 240, 40, 5, 1, 1, 0.25, "hardswish"],
[40, 120, 48, 5, 1, 1, 0.25, "hardswish"],
[48, 144, 48, 5, 1, 1, 0.25, "hardswish"],
[48, 288, 48, 5, 1, 2, 0.25, "hardswish"],
[48, 288, 48, 5, 1, 2, 0.25, "hardswish"],
[48, 288, 48, 5, 1, 2, 0.25, "hardswish"],
],
}
| 44.436893
| 66
| 0.444614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,295
| 0.282936
|
a555c2aabfb2fed9428a296a73e22048b9b84d87
| 14,288
|
py
|
Python
|
rotkehlchen/exchanges/iconomi.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 137
|
2018-03-05T11:53:29.000Z
|
2019-11-03T16:38:42.000Z
|
rotkehlchen/exchanges/iconomi.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 385
|
2018-03-08T12:43:41.000Z
|
2019-11-10T09:15:36.000Z
|
rotkehlchen/exchanges/iconomi.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 59
|
2018-03-08T10:08:27.000Z
|
2019-10-26T11:30:44.000Z
|
import base64
import hashlib
import hmac
import json
import logging
import time
from json.decoder import JSONDecodeError
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple
from urllib.parse import urlencode
import requests
from rotkehlchen.accounting.ledger_actions import LedgerAction
from rotkehlchen.accounting.structures.balance import Balance
from rotkehlchen.assets.asset import Asset
from rotkehlchen.assets.converters import UNSUPPORTED_ICONOMI_ASSETS, asset_from_iconomi
from rotkehlchen.constants import ZERO
from rotkehlchen.constants.assets import A_AUST
from rotkehlchen.errors.asset import UnknownAsset, UnsupportedAsset
from rotkehlchen.errors.misc import RemoteError
from rotkehlchen.errors.serialization import DeserializationError
from rotkehlchen.exchanges.data_structures import (
AssetMovement,
Location,
MarginPosition,
Price,
Trade,
TradeType,
)
from rotkehlchen.exchanges.exchange import ExchangeInterface, ExchangeQueryBalances
from rotkehlchen.inquirer import Inquirer
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.serialization.deserialize import (
deserialize_asset_amount,
deserialize_fee,
deserialize_fval,
)
from rotkehlchen.types import ApiKey, ApiSecret, Timestamp
from rotkehlchen.user_messages import MessagesAggregator
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
def trade_from_iconomi(raw_trade: Dict) -> Trade:
"""Turn an iconomi trade entry to our own trade format
May raise:
- UnknownAsset
- DeserializationError
- KeyError
"""
timestamp = raw_trade['timestamp']
if raw_trade['type'] == 'buy_asset':
trade_type = TradeType.BUY
tx_asset = asset_from_iconomi(raw_trade['target_ticker'])
tx_amount = deserialize_asset_amount(raw_trade['target_amount'])
native_asset = asset_from_iconomi(raw_trade['source_ticker'])
native_amount = deserialize_asset_amount(raw_trade['source_amount'])
elif raw_trade['type'] == 'sell_asset':
trade_type = TradeType.SELL
tx_asset = asset_from_iconomi(raw_trade['source_ticker'])
tx_amount = deserialize_asset_amount(raw_trade['source_amount'])
native_amount = deserialize_asset_amount(raw_trade['target_amount'])
native_asset = asset_from_iconomi(raw_trade['target_ticker'])
amount = tx_amount
rate = Price(native_amount / tx_amount)
fee_amount = deserialize_fee(raw_trade['fee_amount'])
fee_asset = asset_from_iconomi(raw_trade['fee_ticker'])
return Trade(
timestamp=timestamp,
location=Location.ICONOMI,
base_asset=tx_asset,
quote_asset=native_asset,
trade_type=trade_type,
amount=amount,
rate=rate,
fee=fee_amount,
fee_currency=fee_asset,
link=str(raw_trade['transactionId']),
)
class Iconomi(ExchangeInterface): # lgtm[py/missing-call-to-init]
def __init__(
self,
name: str,
api_key: ApiKey,
secret: ApiSecret,
database: 'DBHandler',
msg_aggregator: MessagesAggregator,
):
super().__init__(
name=name,
location=Location.ICONOMI,
api_key=api_key,
secret=secret,
database=database,
)
self.uri = 'https://api.iconomi.com'
self.msg_aggregator = msg_aggregator
def edit_exchange_credentials(
self,
api_key: Optional[ApiKey],
api_secret: Optional[ApiSecret],
passphrase: Optional[str],
) -> bool:
changed = super().edit_exchange_credentials(api_key, api_secret, passphrase)
return changed
def _generate_signature(self, request_type: str, request_path: str, timestamp: str) -> str:
signed_data = ''.join([timestamp, request_type.upper(), request_path, '']).encode()
signature = hmac.new(
self.secret,
signed_data,
hashlib.sha512,
)
return base64.b64encode(signature.digest()).decode()
def _api_query(
self,
verb: Literal['get', 'post'],
path: str,
options: Optional[Dict] = None,
authenticated: bool = True,
) -> Any:
"""
Queries ICONOMI with the given verb for the given path and options
"""
assert verb in ('get', 'post'), (
'Given verb {} is not a valid HTTP verb'.format(verb)
)
request_path_no_args = '/v1/' + path
data = ''
if not options:
request_path = request_path_no_args
else:
request_path = request_path_no_args + '?' + urlencode(options)
timestamp = str(int(time.time() * 1000))
request_url = self.uri + request_path
headers = {}
if authenticated:
signature = self._generate_signature(
request_type=verb.upper(),
request_path=request_path_no_args,
timestamp=timestamp,
)
headers.update({
'ICN-SIGN': signature,
# set api key only here since if given in non authenticated endpoint gives 400
'ICN-API-KEY': self.api_key,
'ICN-TIMESTAMP': timestamp,
})
if data != '':
headers.update({
'Content-Type': 'application/json',
'Content-Length': str(len(data)),
})
log.debug('ICONOMI API Query', verb=verb, request_url=request_url)
try:
response = getattr(self.session, verb)(
request_url,
data=data,
timeout=30,
headers=headers,
)
except requests.exceptions.RequestException as e:
raise RemoteError(f'ICONOMI API request failed due to {str(e)}') from e
try:
json_ret = json.loads(response.text)
except JSONDecodeError as exc:
raise RemoteError('ICONOMI returned invalid JSON response') from exc
if response.status_code not in (200, 201):
if isinstance(json_ret, dict) and 'message' in json_ret:
raise RemoteError(json_ret['message'])
raise RemoteError(
'ICONOMI api request for {} failed with HTTP status code {}'.format(
response.url,
response.status_code,
),
)
return json_ret
def validate_api_key(self) -> Tuple[bool, str]:
"""
Validates that the ICONOMI API key is good for usage in rotki
"""
try:
self._api_query('get', 'user/balance')
return True, ""
except RemoteError:
return False, 'Provided API Key is invalid'
def query_balances(self, **kwargs: Any) -> ExchangeQueryBalances:
assets_balance: Dict[Asset, Balance] = {}
try:
resp_info = self._api_query('get', 'user/balance')
except RemoteError as e:
msg = (
'ICONOMI API request failed. Could not reach ICONOMI due '
'to {}'.format(e)
)
log.error(msg)
return None, msg
if resp_info['currency'] != 'USD':
raise RemoteError('Iconomi API did not return values in USD')
for balance_info in resp_info['assetList']:
ticker = balance_info['ticker']
try:
asset = asset_from_iconomi(ticker)
try:
usd_value = deserialize_fval(balance_info['value'], 'usd_value', 'iconomi')
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'missing key entry for {msg}.'
self.msg_aggregator.add_warning(
f'Skipping iconomi balance entry {balance_info} due to {msg}',
)
continue
try:
amount = deserialize_asset_amount(balance_info['balance'])
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'missing key entry for {msg}.'
self.msg_aggregator.add_warning(
f'Skipping iconomi balance entry {balance_info} due to {msg}',
)
continue
assets_balance[asset] = Balance(
amount=amount,
usd_value=usd_value,
)
except (UnknownAsset, UnsupportedAsset) as e:
asset_tag = 'unknown' if isinstance(e, UnknownAsset) else 'unsupported'
self.msg_aggregator.add_warning(
f'Found {asset_tag} ICONOMI asset {ticker}. '
f' Ignoring its balance query.',
)
continue
for balance_info in resp_info['daaList']:
ticker = balance_info['ticker']
if ticker == 'AUSTS':
# The AUSTS strategy is 'ICONOMI Earn'. We know that this strategy holds its
# value in Anchor UST (AUST). That's why we report the user balance for this
# strategy as usd_value / AUST price.
try:
aust_usd_price = Inquirer().find_usd_price(asset=A_AUST)
except RemoteError as e:
self.msg_aggregator.add_error(
f'Error processing ICONOMI balance entry due to inability to '
f'query USD price: {str(e)}. Skipping balance entry',
)
continue
if aust_usd_price == ZERO:
self.msg_aggregator.add_error(
'Error processing ICONOMI balance entry because the USD price '
'for AUST was reported as 0. Skipping balance entry',
)
continue
try:
usd_value = deserialize_fval(balance_info['value'], 'usd_value', 'iconomi')
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'missing key entry for {msg}.'
self.msg_aggregator.add_warning(
f'Skipping iconomi balance entry {balance_info} due to {msg}',
)
continue
assets_balance[A_AUST] = Balance(
amount=usd_value / aust_usd_price,
usd_value=usd_value,
)
else:
self.msg_aggregator.add_warning(
f'Found unsupported ICONOMI strategy {ticker}. '
f' Ignoring its balance query.',
)
return assets_balance, ''
def query_online_trade_history(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> Tuple[List[Trade], Tuple[Timestamp, Timestamp]]:
page = 0
all_transactions = []
while True:
resp = self._api_query('get', 'user/activity', {"pageNumber": str(page)})
if len(resp['transactions']) == 0:
break
all_transactions.extend(resp['transactions'])
page += 1
log.debug('ICONOMI trade history query', results_num=len(all_transactions))
trades = []
for tx in all_transactions:
timestamp = tx['timestamp']
if timestamp < start_ts:
continue
if timestamp > end_ts:
continue
if tx['type'] in ('buy_asset', 'sell_asset'):
try:
trades.append(trade_from_iconomi(tx))
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Ignoring an iconomi transaction because of unsupported '
f'asset {str(e)}')
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
self.msg_aggregator.add_error(
'Error processing an iconomi transaction. Check logs '
'for details. Ignoring it.',
)
log.error(
'Error processing an iconomi transaction',
error=msg,
trade=tx,
)
return trades, (start_ts, end_ts)
def query_supported_tickers(
self,
) -> List[str]:
tickers = []
resp = self._api_query('get', 'assets', authenticated=False)
for asset_info in resp:
if not asset_info['supported']:
continue
if asset_info['ticker'] in UNSUPPORTED_ICONOMI_ASSETS:
continue
tickers.append(asset_info['ticker'])
return tickers
def query_online_deposits_withdrawals(
self, # pylint: disable=no-self-use
start_ts: Timestamp, # pylint: disable=unused-argument
end_ts: Timestamp, # pylint: disable=unused-argument
) -> List[AssetMovement]:
return [] # noop for iconomi
def query_online_margin_history(
self, # pylint: disable=no-self-use
start_ts: Timestamp, # pylint: disable=unused-argument
end_ts: Timestamp, # pylint: disable=unused-argument
) -> List[MarginPosition]:
return [] # noop for iconomi
def query_online_income_loss_expense(
self, # pylint: disable=no-self-use
start_ts: Timestamp, # pylint: disable=unused-argument
end_ts: Timestamp, # pylint: disable=unused-argument
) -> List[LedgerAction]:
return [] # noop for iconomi
| 35.542289
| 95
| 0.568309
| 11,318
| 0.792133
| 0
| 0
| 0
| 0
| 0
| 0
| 2,917
| 0.204157
|
a555df3734f412141194b24eeb0c8ebadcbf1a97
| 1,010
|
py
|
Python
|
doc/conf.py
|
rerobots/cli
|
e97a20d8aa4b1f118272ded9675b6c0747db321f
|
[
"Apache-2.0"
] | 1
|
2021-04-17T22:51:42.000Z
|
2021-04-17T22:51:42.000Z
|
doc/conf.py
|
rerobots/cli
|
e97a20d8aa4b1f118272ded9675b6c0747db321f
|
[
"Apache-2.0"
] | null | null | null |
doc/conf.py
|
rerobots/cli
|
e97a20d8aa4b1f118272ded9675b6c0747db321f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# parts of this were originally generated by sphinx-quickstart on Thu Aug 31 17:31:36 2017.
import os.path
import sys
sys.path.append(os.path.abspath('..'))
project = 'CLI'
copyright = '2021 rerobots, Inc | <a href="https://github.com/rerobots/cli">source code</a>'
author = 'rerobots, Inc.'
html_logo = '_static/logo.svg'
version = ''
release = ''
language = None
extensions = ['sphinx.ext.autodoc']
autoclass_content = 'init'
source_suffix = '.rst'
master_doc = 'index'
exclude_patterns = []
templates_path = ['_templates']
pygments_style = 'sphinx'
# read more about customization of this style at
# http://alabaster.readthedocs.io/en/stable/customization.html
html_theme = 'alabaster'
html_sidebars = {
}
html_theme_options = {
'show_powered_by': 'false'
}
# Prepare to build on hosts of https://readthedocs.org/
import os
if os.environ.get('READTHEDOCS', 'False') == 'True':
import subprocess
subprocess.check_call('./get-deps.sh')
| 21.489362
| 92
| 0.705941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 564
| 0.558416
|
a555e99a46c6efc7e9dda4b03dbc6e9937a3b54b
| 620
|
py
|
Python
|
pytorch-extension/pytorch_extension_official/cpp/perform_test.py
|
xdr940/utils
|
c4b7b1479956475a7feee90a723541904ec82306
|
[
"MIT"
] | null | null | null |
pytorch-extension/pytorch_extension_official/cpp/perform_test.py
|
xdr940/utils
|
c4b7b1479956475a7feee90a723541904ec82306
|
[
"MIT"
] | null | null | null |
pytorch-extension/pytorch_extension_official/cpp/perform_test.py
|
xdr940/utils
|
c4b7b1479956475a7feee90a723541904ec82306
|
[
"MIT"
] | null | null | null |
import time
from lltm.lltm import LLTM
import torch
batch_size = 16
input_features = 32
state_size = 128
X = torch.randn(batch_size, input_features)
h = torch.randn(batch_size, state_size)
C = torch.randn(batch_size, state_size)
rnn = LLTM(input_features, state_size)#net init
forward = 0
backward = 0
for _ in range(1000):
start = time.time()
new_h, new_C = rnn(X, (h, C))
forward += time.time() - start
start = time.time()
(new_h.sum() + new_C.sum()).backward()
backward += time.time() - start
print('Forward: {:.3f} us | Backward {:.3f} us'.format(forward * 1e6/1e3, backward * 1e6/1e3))
| 23.846154
| 94
| 0.675806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 50
| 0.080645
|
a55636a8a913811f2be1912dad1aedac22c6a849
| 1,980
|
py
|
Python
|
helper/create_functions_table.py
|
Abhisheknishant/iteration_utilities
|
b2bf8d8668ed54d1aadf8c31884fc8a7d28551cc
|
[
"Apache-2.0"
] | 72
|
2016-09-12T03:01:02.000Z
|
2022-03-05T16:54:45.000Z
|
helper/create_functions_table.py
|
Abhisheknishant/iteration_utilities
|
b2bf8d8668ed54d1aadf8c31884fc8a7d28551cc
|
[
"Apache-2.0"
] | 127
|
2016-09-14T02:07:33.000Z
|
2022-03-19T13:17:32.000Z
|
helper/create_functions_table.py
|
Abhisheknishant/iteration_utilities
|
b2bf8d8668ed54d1aadf8c31884fc8a7d28551cc
|
[
"Apache-2.0"
] | 11
|
2017-02-22T20:40:37.000Z
|
2022-03-05T16:55:40.000Z
|
# Licensed under Apache License Version 2.0 - see LICENSE
"""This is a helper that prints the content of the function overview tables .
- docs/index.rst
- README.rst
Both contain a table of functions defined in iteration_utilities and
manually updating them is a pain. Therefore this file can be executed and the
contents can be copy pasted there. Just use::
>>> python helper/create_functions_table.py
Unfortunately the header lines of these tables have to be removed manually,
I haven't found a way to remove them programmatically using the
astropy.io.ascii.RST class.
It's actually important to call this helper from the main repo directory
so the file resolution works correctly.
"""
def _create_overview_table(repo_path, readme=False):
"""Creates an RST table to insert in the "Readme.rst" file for the
complete overview of the package.
Requires `astropy`!
"""
from iteration_utilities import Iterable
from astropy.table import Table
from astropy.io.ascii import RST
import pathlib
p = pathlib.Path(repo_path).joinpath('docs', 'generated')
funcs = sorted([file.name.split('.rst')[0] for file in p.glob('*.rst')],
key=str.lower)
if readme:
rtd_link = ('`{0} <https://iteration-utilities.readthedocs.io/'
'en/latest/generated/{0}.html>`_')
else:
rtd_link = ':py:func:`~iteration_utilities.{0}`'
it = (Iterable(funcs)
# Create a Sphinx link from function name and module
.map(rtd_link.format)
# Group into 4s so we get a 4 column Table
.grouper(4, fillvalue='')
# Convert to list because Table expects it.
.as_list())
print('\n'.join(RST().write(Table(rows=it))))
if __name__ == '__main__':
import pathlib
repo_path = pathlib.Path.cwd()
_create_overview_table(repo_path=repo_path, readme=False)
print('\n\n\n')
_create_overview_table(repo_path=repo_path, readme=True)
| 32.459016
| 77
| 0.685354
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,144
| 0.577778
|
a55746c92e9741f67f15dac2983e811ae99e916b
| 1,092
|
py
|
Python
|
Python Scripts/lesson_49_tuples.py
|
jessequinn/udemy_python_complete
|
b97e657dea2a8680557949f01ac80d3230c82c41
|
[
"MIT"
] | null | null | null |
Python Scripts/lesson_49_tuples.py
|
jessequinn/udemy_python_complete
|
b97e657dea2a8680557949f01ac80d3230c82c41
|
[
"MIT"
] | null | null | null |
Python Scripts/lesson_49_tuples.py
|
jessequinn/udemy_python_complete
|
b97e657dea2a8680557949f01ac80d3230c82c41
|
[
"MIT"
] | null | null | null |
# Given the tuple below that represents the Imelda May album "More Mayhem", write
# code to print the album details, followed by a listing of all the tracks in the album.
#
# Indent the tracks by a single tab stop when printing them (remember that you can pass
# more than one item to the print function, separating them with a comma).
# imelda = "More Mayhem", "Imelda May", 2011, (
# (1, "Pulling the Rug"), (2, "Psycho"), (3, "Mayhem"), (4, "Kentish Town Waltz"))
#
# print(imelda)
#
# title, artist, year, tracks = imelda
# print(title)
# print(artist)
# print(year)
# for song in tracks:
# track, title = song
# print("\tTrack number {}, Title: {}".format(track, title))
#
imelda = "More Mayhem", "Imelda May", 2011, (
[(1, "Pulling the Rug"), (2, "Psycho"), (3, "Mayhem"), (4, "Kentish Town Waltz")])
print(imelda)
imelda[3].append((5, "All For You"))
title, artist, year, tracks = imelda
tracks.append((6, "Eternity"))
print(title)
print(artist)
print(year)
for song in tracks:
track, title = song
print("\tTrack number {}, Title: {}".format(track, title))
| 31.2
| 88
| 0.659341
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 803
| 0.735348
|
a557d896bbb7713624a8d9ae1db240388f2eb7f7
| 1,785
|
py
|
Python
|
MyWriter/testdragdrop.py
|
haha517/mywriter
|
8ddd5ce3b2f31491480dee9beb7367c8d6182282
|
[
"MIT"
] | null | null | null |
MyWriter/testdragdrop.py
|
haha517/mywriter
|
8ddd5ce3b2f31491480dee9beb7367c8d6182282
|
[
"MIT"
] | null | null | null |
MyWriter/testdragdrop.py
|
haha517/mywriter
|
8ddd5ce3b2f31491480dee9beb7367c8d6182282
|
[
"MIT"
] | null | null | null |
import sys
import os
from PyQt4 import QtGui, QtCore
class TestListView(QtGui.QListWidget):
def __init__(self, type, parent=None):
super(TestListView, self).__init__(parent)
self.setAcceptDrops(True)
self.setIconSize(QtCore.QSize(72, 72))
def dragEnterEvent(self, event):
if event.mimeData().hasUrls:
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData().hasUrls:
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasUrls:
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
links = []
for url in event.mimeData().urls():
links.append(str(url.toLocalFile()))
self.emit(QtCore.SIGNAL("dropped"), links)
else:
event.ignore()
class MainForm(QtGui.QMainWindow):
def __init__(self, parent=None):
super(MainForm, self).__init__(parent)
self.view = TestListView(self)
self.connect(self.view, QtCore.SIGNAL("dropped"), self.pictureDropped)
self.setCentralWidget(self.view)
def pictureDropped(self, l):
for url in l:
if os.path.exists(url):
print(url)
icon = QtGui.QIcon(url)
pixmap = icon.pixmap(72, 72)
icon = QtGui.QIcon(pixmap)
item = QtGui.QListWidgetItem(url, self.view)
item.setIcon(icon)
item.setStatusTip(url)
def main():
app = QtGui.QApplication(sys.argv)
form = MainForm()
form.show()
app.exec_()
if __name__ == '__main__':
main()
| 28.790323
| 78
| 0.576471
| 1,583
| 0.886835
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 0.015686
|
a5582803ca69b47af8a599a971fe68204b6f9492
| 3,392
|
py
|
Python
|
apps/ndn_demoapps_wldr.py
|
theuerse/emulation_lib
|
d9388202d7ec9283404f9ab4d2448ff19922b44f
|
[
"MIT"
] | 2
|
2018-12-11T10:02:06.000Z
|
2019-04-01T10:39:09.000Z
|
apps/ndn_demoapps_wldr.py
|
theuerse/emulation_lib
|
d9388202d7ec9283404f9ab4d2448ff19922b44f
|
[
"MIT"
] | null | null | null |
apps/ndn_demoapps_wldr.py
|
theuerse/emulation_lib
|
d9388202d7ec9283404f9ab4d2448ff19922b44f
|
[
"MIT"
] | null | null | null |
import os
from .. import constants
from . import application
class NDN_DemoAppsWLDR(application.Application):
def __init__(self, server, clients, gateways, start, duration, server_params, client_params, routingcmds):
self.server = server
self.clients = clients
self.gateways = gateways
self.startTime = start
self.duration = duration
self.server_params = server_params
self.client_params = client_params
def generateCommands(self, config):
server_exe = "dashproducer"
client_exe = "dashplayer_WLDR"
# (sudo chrt -o -p 0 $BASHPID && dashplayer_WLDR --name /Node1/BBB_first100.mpd -r 12000 -l 500 -a buffer -o /home/nfd/emulation/results/consumer.log &) &
wldr_daemon_cmd = "(sudo chrt -o -p 0 $BASHPID && wldrdaemon_udp -l /var/run/shm/nfd_packet_log/nfd_packet_log.csv"
# start new server instance
self.server.scheduleCmd(constants.SETUP_TIME,"sudo " + server_exe + " " + self.server_params.strip() + " &")
# explicitly stop server at end of emulation
self.server.scheduleCmd(float(config["EMU_DURATION"]), "sudo killall " + server_exe)
wlans = {}
# add commands for clients
for i in range(0, len(self.clients)):
client = self.clients[i]
gateway = self.gateways[i]
client_accessPoint_ip = gateway.getEmuIP(config)
if gateway not in wlans:
wlans[gateway] = [client.getEmuIP(config)]
else:
wlans[gateway].append(client.getEmuIP(config))
# start new client instance at begin of emulation
output_path = os.path.join(config['REMOTE_RESULT_DIR'], "consumer.log")
client.scheduleCmd(constants.SETUP_TIME, "sudo killall wldrdaemon_udp")
client.scheduleCmd(constants.SETUP_TIME, "fuser -k 12345/udp") # kill all application occupying the TCP-port 12345
# schedule server-side wldr-instance to start
client.scheduleCmd(self.startTime, wldr_daemon_cmd + " -d " + client_accessPoint_ip + " > demonlog.txt 2>&1 &) & ")
client.addAppResult(output_path, os.path.join(config['RESULT_DIR'], "consumer_" + str(client.getId()) + ".log_%RUN%"))
client.scheduleCmd(self.startTime , "(sudo chrt -o -p 0 $BASHPID && " + client_exe + " " + self.client_params + " -o " + output_path
+ " > /home/nfd/dashplayerlog.txt 2>&1 &) &")
# explicitly stop client at end of emulation
client.scheduleCmd(float(config["EMU_DURATION"]), "sudo killall " + client_exe)
client.scheduleCmd(float(config["EMU_DURATION"]), "sudo killall wldrdaemon_udp")
client.scheduleCmd(float(config["EMU_DURATION"]), "sudo killall tail")
for accessPoint in wlans:
client_str = " -i ".join(wlans[accessPoint])
accessPoint.scheduleCmd(constants.SETUP_TIME, "sudo killall wldrdaemon_udp")
accessPoint.scheduleCmd(constants.SETUP_TIME, "fuser -k 12345/udp ")
accessPoint.scheduleCmd(constants.SETUP_TIME, wldr_daemon_cmd + " -i " + client_str + " > demonlog.txt 2>&1 &) &")
accessPoint.scheduleCmd(float(config["EMU_DURATION"]), "sudo killall wldrdaemon_udp")
accessPoint.scheduleCmd(float(config["EMU_DURATION"]), "sudo killall tail")
| 50.626866
| 163
| 0.645047
| 3,329
| 0.981427
| 0
| 0
| 0
| 0
| 0
| 0
| 1,118
| 0.329599
|
a55b04ba7921f1a3ec26bc5a38d932e27524c9ac
| 1,918
|
py
|
Python
|
catoclient/commands/scheduletasks.py
|
cloudsidekick/catoclient
|
26907127e38d01f56959618263f4bf61e60784ee
|
[
"Apache-2.0"
] | 1
|
2017-08-31T03:26:50.000Z
|
2017-08-31T03:26:50.000Z
|
catoclient/commands/scheduletasks.py
|
cloudsidekick/catoclient
|
26907127e38d01f56959618263f4bf61e60784ee
|
[
"Apache-2.0"
] | null | null | null |
catoclient/commands/scheduletasks.py
|
cloudsidekick/catoclient
|
26907127e38d01f56959618263f4bf61e60784ee
|
[
"Apache-2.0"
] | null | null | null |
#########################################################################
# Copyright 2011 Cloud Sidekick
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#########################################################################
import catoclient.catocommand
from catoclient.param import Param
import json
class ScheduleTasks(catoclient.catocommand.CatoCommand):
Description = 'Schedules one or more Tasks using a json template file.'
API = 'schedule_tasks'
Examples = '''
cato-schedule-tasks -s ./schedule_template.json
'''
Options = [Param(name='schedulefile', short_name='s', long_name='schedulefile',
optional=False, ptype='string',
doc='''The path to a json formatted schedule definition file. See the schedule_tasks API documentation for the format of the file.''')
]
def main(self):
try:
# first, we need to load the schedule definition
self.tasks = None
if self.schedulefile:
import os
fn = os.path.expanduser(self.schedulefile)
with open(fn, 'r') as f_in:
if not f_in:
print("Unable to open file [%s]." % fn)
self.tasks = f_in.read()
results = self.call_api(self.API, ['tasks'])
print(results)
except Exception as ex:
raise ex
| 39.142857
| 155
| 0.588634
| 1,113
| 0.580292
| 0
| 0
| 0
| 0
| 0
| 0
| 1,095
| 0.570907
|
a55cd95076293cb8d38f62d5a86be378db28011c
| 7,589
|
py
|
Python
|
highlevel_planning_ros/src/highlevel_planning_py/skills/navigate.py
|
ethz-asl/high_level_planning
|
094a73e993a6a9924f6ed067dcdbee70d1ead80e
|
[
"BSD-3-Clause"
] | null | null | null |
highlevel_planning_ros/src/highlevel_planning_py/skills/navigate.py
|
ethz-asl/high_level_planning
|
094a73e993a6a9924f6ed067dcdbee70d1ead80e
|
[
"BSD-3-Clause"
] | null | null | null |
highlevel_planning_ros/src/highlevel_planning_py/skills/navigate.py
|
ethz-asl/high_level_planning
|
094a73e993a6a9924f6ed067dcdbee70d1ead80e
|
[
"BSD-3-Clause"
] | null | null | null |
import pybullet as p
import numpy as np
from icecream import ic
from scipy.spatial.transform import Rotation as R
from highlevel_planning_py.tools.util import (
homogenous_trafo,
invert_hom_trafo,
pos_and_orient_from_hom_trafo,
SkillExecutionError,
)
class SkillNavigate:
def __init__(self, scene, robot):
self.robot_ = robot
self.robot_uid_ = robot.model.uid
self.scene_ = scene
def _check_collisions(self):
for _, obj in self.scene_.objects.items():
temp = p.getClosestPoints(self.robot_uid_, obj.model.uid, distance=0.5)
for elem in temp:
contact_distance = elem[8]
if contact_distance < 0.0:
# print("There is a collision")
return True
return False
def _move(self, pos, orient):
p.resetBasePositionAndOrientation(
self.robot_uid_, pos.tolist(), orient.tolist()
)
def move_to_object(self, target_name, nav_min_dist=None):
target_id = self.scene_.objects[target_name].model.uid
# Get the object position
temp = p.getBasePositionAndOrientation(target_id)
target_pos = np.array(temp[0])
# Get valid nav angles
nav_angle = self.scene_.objects[target_name].nav_angle
if nav_min_dist is None:
nav_min_dist = self.scene_.objects[target_name].nav_min_dist
# Move there
return self.move_to_pos(target_pos, nav_angle, nav_min_dist)
def move_to_pos(self, target_pos, nav_angle=None, nav_min_dist=None):
assert len(target_pos) == 3
assert type(target_pos) is np.ndarray
self.robot_.to_start()
# Get robot position
temp = p.getBasePositionAndOrientation(self.robot_uid_)
robot_pos = np.array(temp[0])
robot_orient = R.from_quat(temp[1])
# Get position and orientation of any object in the robot hand w.r.t the robot base
object_in_hand_uid = self._find_object_in_hand()
T_rob_obj = self._get_object_relative_pose(
object_in_hand_uid, robot_pos, robot_orient
)
if nav_angle is None:
alphas = np.arange(0.0, 2.0 * np.pi, 2.0 * np.pi / 10.0)
else:
alphas = np.array([nav_angle])
if nav_min_dist is None:
radii = np.arange(0.4, 2.0, 0.05)
else:
radii = nav_min_dist + np.arange(0.4, 2.0, 0.05)
# Iterate through points on circles around the target
# First vary the radius
for r in radii:
# Then vary the angle
for alpha in alphas:
direction_vec = np.array([np.cos(alpha), np.sin(alpha), 0])
robot_pos[:2] = target_pos[:2] + r * direction_vec[:2]
rotation = R.from_euler("z", np.pi + alpha, degrees=False)
robot_orient = rotation.as_quat()
# Put robot into this position
self._move(robot_pos, robot_orient)
if not self._check_collisions():
# Move object into robot's hand
self._set_object_relative_pose(
object_in_hand_uid, robot_pos, robot_orient, T_rob_obj
)
return True
return False
def _find_object_in_hand(self):
# Determine which object is in the robot's hand
object_in_hand_uid = None
object_in_hand_name = "nothing"
for obj_name, obj in self.scene_.objects.items():
temp = p.getClosestPoints(
self.robot_uid_,
obj.model.uid,
distance=0.01,
linkIndexA=self.robot_.link_name_to_index["panda_leftfinger"],
)
if len(temp) > 0:
if object_in_hand_uid is not None:
ic("---")
ic(object_in_hand_name)
ic(obj_name)
raise SkillExecutionError(
"Don't know how to deal with more than one object in robot's hand"
)
object_in_hand_uid = obj.model.uid
object_in_hand_name = obj_name
return object_in_hand_uid
def _get_object_relative_pose(self, object_in_hand_uid, robot_pos, robot_orient):
T_rob_obj = None
if object_in_hand_uid is not None:
# Get object position
temp = p.getBasePositionAndOrientation(object_in_hand_uid)
held_object_pos = np.array(temp[0])
held_object_orient = R.from_quat(temp[1])
# Compute object pose relative to robot
r_O_O_obj = held_object_pos
C_O_obj = held_object_orient
T_O_obj = homogenous_trafo(r_O_O_obj, C_O_obj)
r_O_O_rob = robot_pos
C_O_rob = robot_orient
T_O_rob = homogenous_trafo(r_O_O_rob, C_O_rob)
T_rob_obj = np.matmul(invert_hom_trafo(T_O_rob), T_O_obj)
# Check result
T_test = np.matmul(T_O_rob, T_rob_obj)
assert np.all(T_test - T_O_obj < 1e-12)
return T_rob_obj
def _set_object_relative_pose(
self, object_in_hand_uid, robot_pos, robot_orient, T_rob_obj
):
if object_in_hand_uid is not None:
r_O_O_rob = robot_pos
C_O_rob = R.from_quat(robot_orient)
T_O_rob = homogenous_trafo(r_O_O_rob, C_O_rob)
T_O_obj = np.matmul(T_O_rob, T_rob_obj)
(held_object_pos, held_object_orient) = pos_and_orient_from_hom_trafo(
T_O_obj
)
p.resetBasePositionAndOrientation(
object_in_hand_uid,
held_object_pos.tolist(),
held_object_orient.tolist(),
)
def get_nav_in_reach_description():
action_name = "nav-in-reach"
action_params = [
["current_pos", "navgoal"],
["goal_pos", "navgoal"],
["gid", "grasp_id"],
["rob", "robot"],
]
action_preconditions = [
("at", True, ["current_pos", "rob"]),
("has-grasp", True, ["goal_pos", "gid"]),
]
action_effects = [
("in-reach", True, ["goal_pos", "rob"]),
("in-reach", False, ["current_pos", "rob"]),
("at", True, ["goal_pos", "rob"]),
("at", False, ["current_pos", "rob"]),
]
action_exec_ignore_effects = [
("at", False, ["current_pos", "rob"]),
("in-reach", False, ["current_pos", "rob"]),
]
return (
action_name,
{
"params": action_params,
"preconds": action_preconditions,
"effects": action_effects,
"exec_ignore_effects": action_exec_ignore_effects,
},
)
def get_nav_at_description():
action_name = "nav-at"
action_params = [
["current_pos", "navgoal"],
["goal_pos", "navgoal"],
["rob", "robot"],
]
action_preconditions = [("at", True, ["current_pos", "rob"])]
action_effects = [
("at", True, ["goal_pos", "rob"]),
("at", False, ["current_pos", "rob"]),
("in-reach", False, ["current_pos", "rob"]),
]
action_exec_ignore_effects = [
("at", False, ["current_pos", "rob"]),
("in-reach", False, ["current_pos", "rob"]),
]
return (
action_name,
{
"params": action_params,
"preconds": action_preconditions,
"effects": action_effects,
"exec_ignore_effects": action_exec_ignore_effects,
},
)
| 34.339367
| 91
| 0.573593
| 5,550
| 0.731322
| 0
| 0
| 0
| 0
| 0
| 0
| 1,145
| 0.150876
|
a55d8714cd1710f5fc46c5b77d8879d3591e23b3
| 39
|
py
|
Python
|
pynairus/actions/__init__.py
|
venairus/pynairus
|
76227072aa0f0f98a36a3a04eb6a436473cfd9a6
|
[
"MIT"
] | 2
|
2018-02-15T12:16:10.000Z
|
2018-09-11T12:05:12.000Z
|
pynairus/actions/__init__.py
|
venairus/pynairus
|
76227072aa0f0f98a36a3a04eb6a436473cfd9a6
|
[
"MIT"
] | null | null | null |
pynairus/actions/__init__.py
|
venairus/pynairus
|
76227072aa0f0f98a36a3a04eb6a436473cfd9a6
|
[
"MIT"
] | 1
|
2019-10-30T09:40:28.000Z
|
2019-10-30T09:40:28.000Z
|
# coding: utf-8
"""Actions package"""
| 9.75
| 21
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 36
| 0.923077
|
a55f46928d283fccaf2605f89dce3d22df548a5c
| 5,844
|
py
|
Python
|
Eager/elk-experiment/appserver/service_time_analyzer.py
|
UCSB-CS-RACELab/eager-appscale
|
d58fe64bb867ef58af19c1d84a5e1ec68ecddd3d
|
[
"Apache-2.0"
] | 3
|
2016-06-12T01:18:49.000Z
|
2018-07-16T18:20:23.000Z
|
Eager/elk-experiment/appserver/service_time_analyzer.py
|
UCSB-CS-RACELab/eager-appscale
|
d58fe64bb867ef58af19c1d84a5e1ec68ecddd3d
|
[
"Apache-2.0"
] | null | null | null |
Eager/elk-experiment/appserver/service_time_analyzer.py
|
UCSB-CS-RACELab/eager-appscale
|
d58fe64bb867ef58af19c1d84a5e1ec68ecddd3d
|
[
"Apache-2.0"
] | 1
|
2020-05-25T02:59:15.000Z
|
2020-05-25T02:59:15.000Z
|
import argparse
import httplib
import json
import numpy
import re
import time
class RequestInfo:
def __init__(self, req, filtered=[]):
self.key = req['key']
self.timestamp = req['request_timestamp']['value_as_string']
self.api_calls = req['doc_count']
self.service_times = {}
services = req['group_by_service']['buckets']
for service in services:
name = service['key']
if name in filtered:
continue
value = service['service_time']['value']
self.service_times[name] = value
self.total_time = sum(self.service_times.values())
def get_digit(delta_str):
return int(delta_str[:len(delta_str)-1])
def parse_time_delta(delta_str):
pattern = re.compile('^(\d+[dhms]\s*)+$')
if pattern.match(delta_str):
segments = re.split('(\d+[dhms]\s*)', delta_str)
segments = map(lambda s: s.strip(), filter(lambda s: len(s) > 0, segments))
result = 0
for segment in segments:
if segment.endswith('s'):
result += get_digit(segment) * 1000
elif segment.endswith('m'):
result += get_digit(segment) * 1000 * 60
elif segment.endswith('h'):
result += get_digit(segment) * 1000 * 60 * 60
elif segment.endswith('d'):
result += get_digit(segment) * 1000 * 60 * 60 * 24
else:
raise ValueError('Invalid time delta string ' + segment)
return result
else:
raise ValueError('Invalid time delta string ' + delta_str)
def get_request_info(server, port, index, app, time_window, filtered):
start_time = long(time.time() * 1000) - time_window
filtered_query = {
'filtered' : {
'query' : { 'term' : { 'appId' : app }},
'filter' : { 'range' : { 'timestamp' : { 'gte' : start_time}}}
}
}
query = {
'query' : filtered_query,
'sort': { 'timestamp' : { 'order' : 'asc'}},
'aggs': {
'group_by_request' : {
'terms' : { 'field' : 'requestId.raw', 'size': 0, 'order': {'request_timestamp': 'asc'} },
'aggs': {
'request_timestamp': {
'min': { 'field': 'timestamp'}
},
'group_by_service': {
'terms': { 'field': 'service.raw' },
'aggs': {
'service_time': {
'sum': { 'field': 'elapsed' }
}
}
}
}
}
}
}
path = '/{0}/apicall/_search'.format(index)
conn = httplib.HTTPConnection(server, port)
conn.request('POST', path, json.dumps(query))
response = conn.getresponse()
data = response.read()
conn.close()
if response.status != 200:
error_message = 'Server returned unexpected status: {0}\n{1}'.format(response.status, data)
raise RuntimeError(error_message)
output = json.loads(data)
requests = output['aggregations']['group_by_request']['buckets']
result = []
for req in requests:
result.append(RequestInfo(req, filtered))
return result
def calculate_summary(requests, func):
values = filter(lambda val: val > 0, map(func, requests))
if values:
return numpy.mean(values), numpy.std(values), numpy.median(values), len(values)
else:
return 0, 0, 0, 0
def print_output(requests, order):
service_names = [ 'datastore_v3', 'memcache', 'urlfetch' ]
print 'requestId datastore_v3 (datastore_v3%) memcache (memcache%) urlfetch (urlfetch%) total_time api_calls'
sorted_requests = requests
if order:
sorted_requests = sorted(requests, key=lambda x: x.total_time)
for req in sorted_requests:
record = '{0} {1} '.format(req.timestamp, req.key)
for k in service_names:
value = req.service_times.get(k, 0.0)
record += '{0} ({1:.2f}) '.format(value, (value/req.total_time) * 100.0)
record += '{0} {1}'.format(req.total_time, req.api_calls)
print record
print
print 'Total requests: {0}'.format(len(requests))
print '[service] Name mean std median count'
print '[service] Datastore {0:.2f} {1:.2f} {2:.2f} {3}'.format(
*calculate_summary(requests, lambda req: req.service_times.get('datastore_v3', -1)))
print '[service] Memcache {0:.2f} {1:.2f} {2:.2f} {3}'.format(
*calculate_summary(requests, lambda req: req.service_times.get('memcache', -1)))
print '[service] URLFetch {0:.2f} {1:.2f} {2:.2f} {3}'.format(
*calculate_summary(requests, lambda req: req.service_times.get('urlfetch', -1)))
print '[service] TotalTime {0:.2f} {1:.2f} {2:.2f} {3}'.format(
*calculate_summary(requests, lambda req: req.total_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Analyzes execution time of cloud services.')
parser.add_argument('--server', '-s', dest='server', default='128.111.179.159')
parser.add_argument('--port', '-p', type=int, dest='port', default=9200)
parser.add_argument('--index', '-i', dest='index', default='appscale-internal')
parser.add_argument('--app', '-a', dest='app', default='watchtower')
parser.add_argument('--time_window', '-t', dest='time_window', default='1h')
parser.add_argument('--order', '-o', dest='order', action='store_true')
parser.add_argument('--filtered_services', '-fs', nargs='+', dest='filtered_services', default=[])
args = parser.parse_args()
time_window_ms = parse_time_delta(args.time_window)
requests = get_request_info(args.server, args.port, args.index, args.app, time_window_ms, args.filtered_services)
if requests:
print_output(requests, args.order)
else:
print 'No request information found'
| 41.15493
| 117
| 0.5936
| 569
| 0.097365
| 0
| 0
| 0
| 0
| 0
| 0
| 1,435
| 0.245551
|
a55fff704388ce5d543d33e5e01d893ff1080816
| 523
|
py
|
Python
|
favorites/models.py
|
plegulluche/OPC-P11
|
9705d56bb77bb548495954c80af02d421dcbf3a2
|
[
"Unlicense"
] | null | null | null |
favorites/models.py
|
plegulluche/OPC-P11
|
9705d56bb77bb548495954c80af02d421dcbf3a2
|
[
"Unlicense"
] | null | null | null |
favorites/models.py
|
plegulluche/OPC-P11
|
9705d56bb77bb548495954c80af02d421dcbf3a2
|
[
"Unlicense"
] | null | null | null |
from django.db import models
from products.models import Product
from account.models import Account
class FavouriteProduct(models.Model):
user = models.ForeignKey(Account, on_delete=models.CASCADE, related_name="user")
product = models.ForeignKey(
Product, on_delete=models.CASCADE, related_name="product"
)
is_favourite = models.BooleanField(default=True)
def __str__(self):
return (
f'product {self.product.name} {"marked favourite"} by {self.user.username}'
)
| 29.055556
| 87
| 0.707457
| 419
| 0.801147
| 0
| 0
| 0
| 0
| 0
| 0
| 90
| 0.172084
|
a56370a9c455e4054cc211abc2c3f2c8a9e7a1f6
| 171
|
py
|
Python
|
tests/const.py
|
makotookamura/GmoCoin
|
025d3e68364bf52418dbc3445987ff21528db732
|
[
"Apache-2.0"
] | 1
|
2021-05-20T01:34:28.000Z
|
2021-05-20T01:34:28.000Z
|
tests/const.py
|
makotookamura/GmoCoin
|
025d3e68364bf52418dbc3445987ff21528db732
|
[
"Apache-2.0"
] | 44
|
2020-11-15T01:17:38.000Z
|
2021-07-20T13:45:12.000Z
|
tests/const.py
|
makotookamura/GmoCoin
|
025d3e68364bf52418dbc3445987ff21528db732
|
[
"Apache-2.0"
] | 1
|
2021-07-17T16:56:03.000Z
|
2021-07-17T16:56:03.000Z
|
from gmocoin.common.const import ConstMeta
class TestConst(metaclass=ConstMeta):
API_CALL_INTERVAL = 0.5
ORDER_PRICE = 2000000
ORDER_LOSSCUT_PRICE = 1500000
| 21.375
| 42
| 0.77193
| 125
| 0.730994
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a56450c0dab785583a1aaf64015a7d73ea36fb2a
| 2,443
|
py
|
Python
|
pydis_site/apps/api/migrations/0044_migrate_nominations_from_infraction_to_nomination_model.py
|
Numerlor/site
|
e4cec0aeb2a791e622be8edd94fb4e82d150deab
|
[
"MIT"
] | 700
|
2018-11-17T15:56:51.000Z
|
2022-03-30T22:53:17.000Z
|
pydis_site/apps/api/migrations/0044_migrate_nominations_from_infraction_to_nomination_model.py
|
Numerlor/site
|
e4cec0aeb2a791e622be8edd94fb4e82d150deab
|
[
"MIT"
] | 542
|
2018-11-17T13:39:42.000Z
|
2022-03-31T11:24:00.000Z
|
pydis_site/apps/api/migrations/0044_migrate_nominations_from_infraction_to_nomination_model.py
|
Numerlor/site
|
e4cec0aeb2a791e622be8edd94fb4e82d150deab
|
[
"MIT"
] | 178
|
2018-11-21T09:06:56.000Z
|
2022-03-31T07:43:28.000Z
|
# Generated by Django 2.2.5 on 2019-09-30 12:15
import logging
from django.db import migrations
from django.db.models import Q
log = logging.getLogger('nomination_migration')
def migrate_nominations_to_new_model(apps, schema_editor):
"""
Migrations nominations from the infraction model to the nomination model.
This migration works by replaying the nomination history in chronological order, adding and
ending nominations as we've recorded them.
"""
Infraction = apps.get_model('api', 'Infraction')
Nomination = apps.get_model('api', 'Nomination')
all_nominations = (
Q(reason__startswith="Helper nomination:") | Q(reason__startswith="Unwatched (talent-pool):")
)
for infraction in Infraction.objects.filter(all_nominations).order_by('inserted_at'):
if infraction.reason.startswith("Helper nomination:"):
if Nomination.objects.filter(user=infraction.user, active=True).exists():
log.error(
f"User `{infraction.user.id}` already has an active nomination, aborting."
)
continue
nomination = Nomination(
user=infraction.user,
inserted_at=infraction.inserted_at,
reason=infraction.reason[19:], # Strip "Helper nomination: " prefix
actor=infraction.actor,
active=True,
)
nomination.save()
infraction.delete()
elif infraction.reason.startswith("Unwatched (talent-pool):"):
if not Nomination.objects.filter(user=infraction.user, active=True).exists():
log.error(
f"User `{infraction.user.id}` has no active nomination, can't end it!"
)
continue
nomination = Nomination.objects.get(user=infraction.user, active=True)
nomination.end_reason = infraction.reason[25:] # Strip "Unwatched (talent-pool):"
nomination.ended_at = infraction.inserted_at
nomination.active = False
nomination.save()
infraction.delete()
else:
log.error(f"I don't understand this infraction: {infraction}")
class Migration(migrations.Migration):
dependencies = [
('api', '0043_infraction_hidden_warnings_to_notes'),
]
operations = [
migrations.RunPython(migrate_nominations_to_new_model),
]
| 37.584615
| 101
| 0.632419
| 217
| 0.088825
| 0
| 0
| 0
| 0
| 0
| 0
| 753
| 0.308228
|
a565f1cec83237287d55c5339e0a84e9756b4648
| 276
|
py
|
Python
|
python_Ashwin-A-K/day_22.py
|
01coders/50-Days-Of-Code
|
98928cf0e186ee295bc90a4da0aa9554e2918659
|
[
"MIT"
] | null | null | null |
python_Ashwin-A-K/day_22.py
|
01coders/50-Days-Of-Code
|
98928cf0e186ee295bc90a4da0aa9554e2918659
|
[
"MIT"
] | null | null | null |
python_Ashwin-A-K/day_22.py
|
01coders/50-Days-Of-Code
|
98928cf0e186ee295bc90a4da0aa9554e2918659
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import datetime
x = datetime.datetime.now()
print(x) # current date and time
print(x.year) # current year
print(x.strftime("%A")) # current day
y = datetime.datetime(2020, 5, 17) # set date
print(y)
print(y.strftime("%B")) # Month name, full version
| 21.230769
| 51
| 0.681159
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 111
| 0.402174
|
a567756da5a12285509d78272354ba05926525ee
| 587
|
py
|
Python
|
DesignPatterns/Node.py
|
QuantumFractal/Python-Scripts
|
9959af9fe835abd550365e98e4fc63e6b8357d1f
|
[
"MIT"
] | 2
|
2015-01-30T04:51:27.000Z
|
2015-12-31T08:47:22.000Z
|
DesignPatterns/Node.py
|
QuantumFractal/Python-Scripts
|
9959af9fe835abd550365e98e4fc63e6b8357d1f
|
[
"MIT"
] | null | null | null |
DesignPatterns/Node.py
|
QuantumFractal/Python-Scripts
|
9959af9fe835abd550365e98e4fc63e6b8357d1f
|
[
"MIT"
] | null | null | null |
class Node:
def __init__(self, parent):
if parent is None:
self.isRoot = True
else:
self.isRoot = False
self.parent = parent
self.parent.add_child(self)
self.children = []
def __str__(self):
return 'Node'
def add_child(self, child):
self.children.append(child)
def print_tree_below(self, tabs = 0):
string = ''
print self
for child in self.children:
if child.children == []:
print tabs*'\t'+ str(child)
else:
child.print_tree_below(tabs+1)
n1 = Node(None)
n2 = Node(n1)
n3 = Node(n1)
n4 = Node(n2)
n5 = Node(n2)
n1.print_tree_below()
| 16.771429
| 38
| 0.654174
| 489
| 0.833049
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.020443
|
a5690fcf0124a35633cc811501e412b22c1aa270
| 60
|
py
|
Python
|
tests/__init__.py
|
ziliac/pyrmx
|
54f1c79f85f2142b8fb755f815642c4701e5a57b
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
ziliac/pyrmx
|
54f1c79f85f2142b8fb755f815642c4701e5a57b
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
ziliac/pyrmx
|
54f1c79f85f2142b8fb755f815642c4701e5a57b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Unit test package for pyrmx."""
| 15
| 34
| 0.55
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 57
| 0.95
|
a569dd73bf4c737b5da9b60bab3083b5192099d3
| 5,151
|
py
|
Python
|
weakest_link/game.py
|
jmattfong/weakest-link
|
c4dba2b51a7271b83d3cc14b1329836805019671
|
[
"Apache-2.0"
] | null | null | null |
weakest_link/game.py
|
jmattfong/weakest-link
|
c4dba2b51a7271b83d3cc14b1329836805019671
|
[
"Apache-2.0"
] | null | null | null |
weakest_link/game.py
|
jmattfong/weakest-link
|
c4dba2b51a7271b83d3cc14b1329836805019671
|
[
"Apache-2.0"
] | null | null | null |
from weakest_link.util import wait_for_choice, green, red, dollars, get_random_mean_word, starts_with_vowel, format_time
class WeakestLinkGame :
def __init__(self, players, rounds, final_round) :
self.players = players
self.rounds = rounds
self.final_round = final_round
self.total_bank = 0
self.maximum_bank = 0
self.current_round = 0
# For the API
def get_current_round(self) :
return self.rounds[self.current_round] if self.current_round < len(self.rounds) else self.final_round
def get_current_round_name(self) :
return self.get_current_round().get_name()
def get_players(self) :
return self.players
def get_current_bank(self, color=True) :
if self.current_round >= len(self.rounds) :
return 0
return dollars(self.get_current_round().round_bank, color=color)
def get_total_bank(self, color=True) :
return dollars(self.total_bank, color=False)
def get_bank_links(self) :
if self.current_round >= len(self.rounds) :
return []
return [dollars(link, color=False) for link in self.get_current_round().bank_links]
def get_current_link(self) :
if self.current_round >= len(self.rounds) :
return 0
return self.get_current_round().current_link
def get_current_player_num(self) :
if self.current_round >= len(self.rounds) :
return 0
return self.get_current_round().get_current_player_num()
def get_time_remaining(self) :
if self.current_round >= len(self.rounds) :
return 0
time = self.get_current_round().seconds_remaining
time = time if time > 0 else 0
return format_time(time)
# For the CLI
def run(self) :
first_player = self.players[0]
for i in range(len(self.rounds)) :
self.current_round = i
if len(self.players) == 2 :
print("Not running all rounds since we don't have enough players")
print()
break
if i != 0 :
print('As the strongest link last round,', green(first_player), 'will go first')
print()
round = self.rounds[i]
self.try_to_start_round(i+1, round, first_player)
first_player = self.handle_finished_round_results(round)
if self.current_round < 2 :
print('Not voting off weakest link since we are on round', self.current_round+1)
weakest_link = None
elif self.current_round == 2 :
print(red('Time to vote off multiple players!'))
weakest_link = self.vote_for_weakest_link()
weakest_link = self.vote_for_weakest_link()
weakest_link = self.vote_for_weakest_link()
else :
weakest_link = self.vote_for_weakest_link()
if first_player == weakest_link :
first_player = round.get_strongest_link(first_player)
self.current_round = len(self.rounds)
while len(self.players) > 2 :
weakest_link = self.vote_for_weakest_link()
if first_player == weakest_link :
first_player = round.get_strongest_link(first_player)
first_player = wait_for_choice('As the strongest link last round, ' + green(first_player) + ' chooses who will go first in the ' +\
red('final round') + '. Choices: ' + ", ".join(self.players) + ' > ', self.players)
self.try_to_start_round('Final', self.final_round, first_player)
print(green(str(self.final_round.winner) + ' is the winner! They win ' + dollars(self.total_bank)))
print()
print("Game over, goodnight!")
# Helpers
def try_to_start_round(self, round_num, round, first_player) :
wait_for_choice("Enter 'S' to start round " + str(round_num) + " > ", 'S')
print('Starting round', round_num)
print()
round.start_round(self.players, first_player)
print('Finished round', round_num)
print()
def handle_finished_round_results(self, round) :
# TODO determine next first player and total bank
self.total_bank += round.round_bank
self.maximum_bank += round.bank_links[-1]
strongest_link = round.get_strongest_link()
print('That round the team banked', dollars(round.round_bank))
adjective = get_random_mean_word()
print('Out of a possible', dollars(self.maximum_bank), "the team banked", 'an' if starts_with_vowel(adjective) else 'a', adjective, dollars(self.total_bank))
print('Statistically, the', green('strongest link'), 'was', green(strongest_link))
print('Statistically, the', red('weakest link'), 'was', red(round.get_weakest_link()))
print()
return strongest_link
def vote_for_weakest_link(self) :
weakest_link = wait_for_choice("Who is the weakest link? Choices: " + ', '.join(self.players) + " > ", self.players)
self.players.remove(weakest_link)
return weakest_link
| 40.559055
| 165
| 0.628422
| 5,028
| 0.976121
| 0
| 0
| 0
| 0
| 0
| 0
| 707
| 0.137255
|
a56b8b89c70b03cbae514c630dd4557886c37a12
| 1,338
|
py
|
Python
|
infiltrate/models/card/expedition.py
|
Qazzquimby/eternalCardEvaluator
|
ef8640ed819a89e5198f8aedf0861a29c57c5720
|
[
"MIT"
] | 4
|
2019-04-08T09:30:10.000Z
|
2020-09-15T19:25:30.000Z
|
infiltrate/models/card/expedition.py
|
Qazzquimby/eternalCardEvaluator
|
ef8640ed819a89e5198f8aedf0861a29c57c5720
|
[
"MIT"
] | 19
|
2019-04-09T19:02:14.000Z
|
2020-12-25T05:22:45.000Z
|
infiltrate/models/card/expedition.py
|
Qazzquimby/eternalCardEvaluator
|
ef8640ed819a89e5198f8aedf0861a29c57c5720
|
[
"MIT"
] | null | null | null |
import typing as t
import infiltrate.browsers as browsers
import infiltrate.eternal_warcy_cards_browser as ew_cards
import infiltrate.models.card as card_mod
from infiltrate import db
def update_is_in_expedition():
"""Sets the is_in_expedition column of the cards table
to match Eternal Warcry readings."""
card_mod.Card.query.update({"is_in_expedition": False})
expedition_card_ids = _get_expedition_card_ids()
for card_id in expedition_card_ids:
card_mod.Card.query.filter(
card_mod.Card.set_num == card_id.set_num,
card_mod.Card.card_num == card_id.card_num,
).update({"is_in_expedition": True})
db.session.commit()
def _get_expedition_card_ids() -> t.List[card_mod.CardId]:
expedition_id = _get_expedition_id()
root_url = ew_cards.get_ew_cards_root_url(expedition_id=expedition_id)
return ew_cards.get_card_ids_in_search(root_url)
def _get_expedition_id():
card_url = "https://eternalwarcry.com/cards"
most_recent_expedition_selector = "#Expedition > option:nth-child(2)"
element = browsers.get_first_element_from_url_and_selector(
url=card_url, selector=most_recent_expedition_selector
)
expedition_id = element.attrs["value"]
return expedition_id
if __name__ == "__main__":
result = _get_expedition_card_ids()
| 32.634146
| 74
| 0.750374
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 216
| 0.161435
|
a56e7c7d3eb512b85fa07082bf02be47726e19fd
| 8,373
|
py
|
Python
|
attribution/authorship_pipeline/classifiers/BaseClassifier.py
|
yangzhou6666/authorship-detection
|
f28701dea256da70eb8ba216c2572e1975c99b54
|
[
"MIT"
] | 14
|
2020-10-26T06:05:55.000Z
|
2022-03-08T08:32:17.000Z
|
attribution/authorship_pipeline/classifiers/BaseClassifier.py
|
yangzhou6666/authorship-detection
|
f28701dea256da70eb8ba216c2572e1975c99b54
|
[
"MIT"
] | 10
|
2020-02-29T16:55:20.000Z
|
2021-11-06T10:40:32.000Z
|
attribution/authorship_pipeline/classifiers/BaseClassifier.py
|
yangzhou6666/authorship-detection
|
f28701dea256da70eb8ba216c2572e1975c99b54
|
[
"MIT"
] | 4
|
2021-07-28T12:27:46.000Z
|
2021-10-04T18:12:33.000Z
|
from collections import namedtuple
from math import ceil
from typing import Tuple, Dict, Union, List, Counter
import numpy as np
import pandas as pd
from classifiers.config import Config
from data_loading.PathMinerDataset import PathMinerDataset
from data_loading.PathMinerLoader import PathMinerLoader
from data_loading.PathMinerSnapshotLoader import PathMinerSnapshotLoader
from preprocessing.context_split import PickType, ContextSplit
from util import ProcessedFolder, ProcessedSnapshotFolder
ClassificationResult = namedtuple(
'ClassificationResult',
('accuracy', 'macro_precision', 'macro_recall', 'fold_ind')
)
def compute_classification_result(
true_labels: List, predicted_labels: List, fold_ind: Union[int, Tuple[int, int]]
) -> ClassificationResult:
"""
Compute metric values (accuracy, precision, recall), given the predictions.
:param true_labels: true authors
:param predicted_labels: model's predictions
:param fold_ind: index that is used to refer to the fold in cross-validation
:return: an instance of ClassificationResult that contains the computed metric values
"""
true_labels = np.array(true_labels, dtype=np.int)
predicted_labels = np.array(predicted_labels, dtype=np.int)
labels, counts = np.unique(true_labels, return_counts=True)
tp, fp, tn, fn = 0, 0, 0, 0
precisions = []
recalls = []
# print('===========')
# for true_label, predicted_label in zip(true_labels, predicted_labels):
# if true_label != predicted_label:
# print(f'true: {true_label} predicted: {predicted_label}')
# print('===========')
for label, count in zip(labels, counts):
true_positive = np.sum(np.logical_and(true_labels == label, predicted_labels == label))
false_positive = np.sum(np.logical_and(true_labels != label, predicted_labels == label))
true_negative = np.sum(np.logical_and(true_labels != label, predicted_labels != label))
false_negative = np.sum(np.logical_and(true_labels == label, predicted_labels != label))
tp += true_positive
fp += false_positive
tn += true_negative
fn += false_negative
precisions.append(tp / (tp + fp) if (tp + fp > 0) else 0.)
recalls.append(tp / (tp + fn))
return ClassificationResult(
accuracy=np.mean(true_labels == predicted_labels),
macro_precision=np.mean(precisions),
macro_recall=np.mean(recalls),
fold_ind=fold_ind
)
class BaseClassifier:
"""
Base class for all classifiers that handles correct setup of data loading, data splitting, and cross-validation.
"""
def __init__(self, config: Config, project_folder: Union[ProcessedFolder, ProcessedSnapshotFolder],
change_entities: pd.Series, change_to_time_bucket: Dict, min_max_count: Tuple[int, int],
author_occurrences: Counter, context_splits: List[ContextSplit]):
self.config = config
self.__fix_random()
if config.mode() == "snapshot":
self._loader = PathMinerSnapshotLoader(project_folder)
else:
self._loader = PathMinerLoader(
project_folder, change_entities, change_to_time_bucket, min_max_count, author_occurrences,
context_splits
)
self.__indices_per_class, self._n_classes = self.__split_into_classes()
self.update_chosen_classes()
self.models = {}
def __fix_random(self):
np.random.seed(self.config.seed())
self.__seed = self.config.seed()
def __split_into_classes(self) -> Tuple[np.ndarray, int]:
"""
Computes indices that belong to each class (author).
"""
print("Splitting into classes")
index = self._loader.labels()
n_classes = self._loader.n_classes()
indices_per_class = [[] for _ in range(n_classes)]
for i, ind in enumerate(index):
indices_per_class[ind].append(i)
indices_per_class = np.array([np.array(inds, dtype=np.int32) for inds in indices_per_class])
# for k in range(n_classes):
# np.random.shuffle(indices_per_class[k])
return indices_per_class, n_classes
def update_chosen_classes(self):
"""
For evaluation on the data from a subset of authors, this method re-samples the picked authors.
If all the authors should be used, it keeps selecting the complete set of authors.
"""
chosen_classes = np.random.choice(self._n_classes, self.config.n_classes(), replace=False) \
if self.config.n_classes() is not None \
else np.arange(self._n_classes)
self.__chosen_classes = chosen_classes
def _split_train_test(self, loader: PathMinerLoader, fold_ind: Union[int, Tuple[int, int]], pad: bool = False) \
-> Tuple[PathMinerDataset, PathMinerDataset]:
"""
Creates train and test datasets. The type of the experiment (regular, context, time) is controlled by the config
passed to the Classifier object at the initialization step. Fold index is used to tell which part of data to
use for testing (selected fold in cross-validation, test slice for 'time', or test subset of code snippets for
'context').
:param loader: data loader
:param fold_ind: part of data used for testing (number in case of cross-validation or 'context', two numbers for 'time')
:param pad: whether to pad data (used for preparing tensors for the Neural Network model)
:return: a tuple of training and testing datasets
"""
chosen_classes = self.__chosen_classes
if self.config.mode() == 'time':
train_fold, test_fold = fold_ind
train_indices = self._loader.time_buckets() == train_fold
test_indices = self._loader.time_buckets() == test_fold
elif self.config.mode() == 'context':
train_indices = self._loader.context_indices(fold_ind) == PickType.TRAIN
test_indices = self._loader.context_indices(fold_ind) == PickType.TEST
else:
test_size = self.config.test_size()
if isinstance(test_size, int):
start_ind = fold_ind * test_size
train_indices = np.concatenate([
np.concatenate((inds[:min(inds.size, start_ind)], inds[min(inds.size, start_ind + test_size):]))
for inds in self.__indices_per_class[chosen_classes]
])
test_indices = np.concatenate([
inds[min(inds.size, start_ind):min(inds.size, start_ind + test_size)]
for inds in self.__indices_per_class[chosen_classes]
])
else:
train_indices = np.concatenate([
np.concatenate((inds[:ceil(test_size * inds.size) * fold_ind],
inds[min(inds.size, ceil(test_size * inds.size) * (fold_ind + 1)):]))
for inds in self.__indices_per_class[chosen_classes]
])
test_indices = np.concatenate([
inds[
ceil(test_size * inds.size) * fold_ind:min(inds.size, ceil(test_size * inds.size) * (fold_ind + 1))]
for inds in self.__indices_per_class[chosen_classes]
])
train_indices = np.array(train_indices, dtype=np.int32)
test_indices = np.array(test_indices, dtype=np.int32)
return self._create_datasets(loader, train_indices, test_indices, pad)
def _create_datasets(self, loader, train_indices, test_indices, pad) -> Tuple[PathMinerDataset, PathMinerDataset]:
"""
:return: datasets for training and testing
"""
return PathMinerDataset.from_loader(loader, train_indices, pad), \
PathMinerDataset.from_loader(loader, test_indices, pad)
def cross_validation_folds(self) -> List[int]:
"""
:return: a list of fold indices depending on the test size passed in config
"""
test_size = self.config.test_size()
if isinstance(test_size, float):
return list(range(int(np.ceil(1. / test_size))))
else:
return list(range((int(np.ceil(max([inds.size for inds in self.__indices_per_class]) / test_size)))))
| 47.845714
| 128
| 0.651618
| 5,877
| 0.701899
| 0
| 0
| 0
| 0
| 0
| 0
| 2,056
| 0.245551
|
a56ef28284a9ee515302682dd5904409e87c4d93
| 4,965
|
py
|
Python
|
irekua_rest_api/views/__init__.py
|
IslasGECI/irekua-rest-api
|
35cf5153ed7f54d12ebad2ac07d472585f04e3e7
|
[
"BSD-4-Clause"
] | null | null | null |
irekua_rest_api/views/__init__.py
|
IslasGECI/irekua-rest-api
|
35cf5153ed7f54d12ebad2ac07d472585f04e3e7
|
[
"BSD-4-Clause"
] | 11
|
2020-03-28T18:51:50.000Z
|
2022-01-13T01:47:40.000Z
|
irekua_rest_api/views/__init__.py
|
IslasGECI/irekua-rest-api
|
35cf5153ed7f54d12ebad2ac07d472585f04e3e7
|
[
"BSD-4-Clause"
] | 1
|
2021-05-06T19:38:14.000Z
|
2021-05-06T19:38:14.000Z
|
# pylint: disable=C0301
from .annotations.annotation_tools import AnnotationToolViewSet
from .annotations.annotation_votes import AnnotationVoteViewSet
from .annotations.annotations import AnnotationViewSet
from .data_collections.collection_devices import CollectionDeviceViewSet
from .data_collections.collection_sites import CollectionSiteViewSet
from .data_collections.collection_users import CollectionUserViewSet
from .data_collections.data_collections import CollectionViewSet
from .data_collections.metacollections import MetaCollectionViewSet
from .data_collections.administrators import CollectionAdministratorViewSet
from .devices.device_brands import DeviceBrandViewSet
from .devices.devices import DeviceViewSet
from .devices.physical_devices import PhysicalDeviceViewSet
from .items.items import ItemViewSet
from .licences import LicenceViewSet
from .object_types.annotation_types import AnnotationTypeViewSet
from .object_types.data_collections.collection_administrators import CollectionTypeAdministratorViewSet
from .object_types.data_collections.collection_annotation_types import CollectionTypeAnnotationTypeViewSet
from .object_types.data_collections.collection_licence_types import CollectionTypeLicenceTypeViewSet
from .object_types.data_collections.collection_sampling_event_types import CollectionTypeSamplingEventTypeViewSet
from .object_types.data_collections.collection_site_types import CollectionTypeSiteTypeViewSet
from .object_types.data_collections.collection_event_types import CollectionTypeEventTypeViewSet
from .object_types.data_collections.collection_types import CollectionTypeViewSet
from .object_types.data_collections.collection_item_types import CollectionTypeItemTypeViewSet
from .object_types.data_collections.collection_device_types import CollectionTypeDeviceTypeViewSet
from .object_types.data_collections.collection_roles import CollectionTypeRoleViewSet
from .object_types.device_types import DeviceTypeViewSet
from .object_types.entailment_types import EntailmentTypeViewSet
from .object_types.event_types import EventTypeViewSet
from .object_types.item_types import ItemTypeViewSet
from .object_types.mime_types import MimeTypeViewSet
from .object_types.licence_types import LicenceTypeViewSet
from .object_types.sampling_events.sampling_event_type_device_types import SamplingEventTypeDeviceTypeViewSet
from .object_types.sampling_events.sampling_event_type_site_types import SamplingEventTypeSiteTypeViewSet
from .object_types.sampling_events.sampling_event_types import SamplingEventTypeViewSet
from .object_types.site_types import SiteTypeViewSet
from .object_types.term_types import TermTypeViewSet
from .sampling_events.sampling_event_devices import SamplingEventDeviceViewSet
from .sampling_events.sampling_events import SamplingEventViewSet
from .items.secondary_items import SecondaryItemViewSet
from .sites import SiteViewSet
from .items.tags import TagViewSet
from .terms.entailments import EntailmentViewSet
from .terms.synonym_suggestions import SynonymSuggestionViewSet
from .terms.synonyms import SynonymViewSet
from .terms.term_suggestions import TermSuggestionViewSet
from .terms.terms import TermViewSet
from .users.institutions import InstitutionViewSet
from .users.roles import RoleViewSet
from .users.users import UserViewSet
from .models.model import ModelViewSet
from .models.model_version import ModelVersionViewSet
from .models.model_prediction import ModelPredictionViewSet
__all__ = [
'AnnotationToolViewSet',
'AnnotationTypeViewSet',
'AnnotationViewSet',
'AnnotationVoteViewSet',
'CollectionDeviceViewSet',
'CollectionSiteViewSet',
'CollectionTypeAdministratorViewSet',
'CollectionTypeAnnotationTypeViewSet',
'CollectionTypeLicenceTypeViewSet',
'CollectionTypeSamplingEventTypeViewSet',
'CollectionTypeItemTypeViewSet',
'CollectionTypeSiteTypeViewSet',
'CollectionTypeEventTypeViewSet',
'CollectionTypeViewSet',
'CollectionUserViewSet',
'CollectionViewSet',
'DeviceBrandViewSet',
'DeviceTypeViewSet',
'DeviceViewSet',
'EntailmentTypeViewSet',
'EntailmentViewSet',
'EventTypeViewSet',
'InstitutionViewSet',
'ItemTypeViewSet',
'ItemViewSet',
'LicenceTypeViewSet',
'LicenceViewSet',
'MetaCollectionViewSet',
'PhysicalDeviceViewSet',
'RoleViewSet',
'SamplingEventDeviceViewSet',
'SamplingEventTypeDeviceTypeViewSet',
'SamplingEventTypeSiteTypeViewSet',
'SamplingEventTypeViewSet',
'SamplingEventViewSet',
'SecondaryItemViewSet',
'SiteTypeViewSet',
'SiteViewSet',
'SynonymSuggestionViewSet',
'SynonymViewSet',
'TagViewSet',
'TermSuggestionViewSet',
'TermTypeViewSet',
'TermViewSet',
'UserViewSet',
'CollectionTypeDeviceTypeViewSet',
'CollectionTypeRoleViewSet',
'CollectionAdministratorViewSet',
'MimeTypeViewSet',
'ModelViewSet',
'ModelVersionViewSet',
'ModelPredictionViewSet'
]
| 45.136364
| 113
| 0.848338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,210
| 0.243706
|
a5722597309534c03d51a4f6182a1dcb0d277f2d
| 2,765
|
py
|
Python
|
acurite/AcuriteManager.py
|
jamespauly/udi-acurite-poly
|
8c4866c3b18cf1c27f37ead392e732aa49e1bc07
|
[
"MIT"
] | null | null | null |
acurite/AcuriteManager.py
|
jamespauly/udi-acurite-poly
|
8c4866c3b18cf1c27f37ead392e732aa49e1bc07
|
[
"MIT"
] | null | null | null |
acurite/AcuriteManager.py
|
jamespauly/udi-acurite-poly
|
8c4866c3b18cf1c27f37ead392e732aa49e1bc07
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timezone
import udi_interface
import requests
import json
from enums import BatteryLevel, DeviceStatus
from nodes import AcuriteDeviceNode
LOGGER = udi_interface.LOGGER
Custom = udi_interface.Custom
class AcuriteManager():
def __init__(self, user, password):
self.user = user
self.password = password
def login(self):
try:
loginHeaders = {'Content-Type': 'application/json'}
loginData = json.dumps(
{'email': self.user, 'password': self.password})
loginResp = requests.post('https://marapi.myacurite.com/users/login', data=loginData, headers=loginHeaders)
loginRespJO = loginResp.json()
statusCode = loginResp.status_code
if statusCode != 200:
return None, None
LOGGER.info('Login HTTP Status Code: {}'.format(str(statusCode)))
LOGGER.debug(json.dumps(loginRespJO))
accountId = loginRespJO['user']['account_users'][0]['account_id']
tokenId = loginRespJO['token_id']
except Exception as e:
LOGGER.error('Failed to Login to Acurite', e)
return None, None
return tokenId, accountId
def getHubDevices(self):
tokenId, accountId = self.login()
if tokenId is not None and accountId is not None:
try:
hubHeaders = {'Content-Type': 'application/json', 'X-ONE-VUE-TOKEN': tokenId}
hubResp = requests.get('https://marapi.myacurite.com/accounts/{}/dashboard/hubs'.format(str(accountId)),
headers=hubHeaders)
hubsRespJO = hubResp.json()
hubId = hubsRespJO['account_hubs'][0]['id']
deviceHeaders = {'Content-Type': 'application/json', 'X-ONE-VUE-TOKEN': tokenId}
deviceResp = requests.get(
'https://marapi.myacurite.com/accounts/{}/dashboard/hubs/{}'.format(str(accountId), str(hubId)),
headers=deviceHeaders)
deviceRespJO = deviceResp.json()
statusCode = deviceResp.status_code
if statusCode != 200:
return None
LOGGER.info('Got Acurite Devices')
return deviceRespJO
except Exception as e:
LOGGER.error('AcuriteManager - Error in update', e)
return None
else:
return None
def convert_timedelta_min(self, duration):
days, seconds = duration.days, duration.seconds
hours = seconds // 3600
minutes = (seconds % 3600) // 60
# seconds = (seconds % 60)
return (days * 24 * 60) + (hours * 60) + minutes
| 37.876712
| 120
| 0.58264
| 2,529
| 0.914647
| 0
| 0
| 0
| 0
| 0
| 0
| 504
| 0.182278
|
a5724709634797eaf22a9f27d89e8c87596f3423
| 115
|
py
|
Python
|
recvCases/conf.py
|
BattleJudge/recvCase
|
b7e55cd3c40603fe2c0086066421b269a0664f1e
|
[
"MIT"
] | null | null | null |
recvCases/conf.py
|
BattleJudge/recvCase
|
b7e55cd3c40603fe2c0086066421b269a0664f1e
|
[
"MIT"
] | null | null | null |
recvCases/conf.py
|
BattleJudge/recvCase
|
b7e55cd3c40603fe2c0086066421b269a0664f1e
|
[
"MIT"
] | null | null | null |
ErrorMsg = {'BadZipFile' : 'Uploaded zip file is bad',
'EmptyZipFile' : 'Uploaded zip file is empty',}
| 57.5
| 59
| 0.626087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 80
| 0.695652
|
a5728dfb70546b1b777313ae3ec58c3a19425e50
| 5,650
|
py
|
Python
|
DTL/db/models/graphicsmodels.py
|
rocktavious/DevToolsLib
|
117200c91a3361e04f7c8e07d2ed4999bbcfc469
|
[
"MIT"
] | 1
|
2015-03-23T18:52:12.000Z
|
2015-03-23T18:52:12.000Z
|
DTL/db/models/graphicsmodels.py
|
rocktavious/DevToolsLib
|
117200c91a3361e04f7c8e07d2ed4999bbcfc469
|
[
"MIT"
] | null | null | null |
DTL/db/models/graphicsmodels.py
|
rocktavious/DevToolsLib
|
117200c91a3361e04f7c8e07d2ed4999bbcfc469
|
[
"MIT"
] | 2
|
2017-05-21T12:50:41.000Z
|
2021-10-17T03:32:45.000Z
|
from DTL.qt import QtCore, QtGui
from DTL.qt.QtCore import Qt
#------------------------------------------------------------
#------------------------------------------------------------
class GraphicsItemModel(QtGui.QGraphicsItem):
unselected_color = QtGui.QColor(100,100,100)
selected_color = QtGui.QColor(100,250,100)
#------------------------------------------------------------
def __init__(self, index, **kwds):
super(GraphicsItemModel, self).__init__()
self.index = QtCore.QPersistentModelIndex(index)
self.rect = QtCore.QRectF(0,0,0,0)
self.shape = QtGui.QPainterPath()
self.brush = QtGui.QBrush()
self.pen = QtGui.QPen()
self.setFlags(self.ItemIsSelectable | self.ItemIsFocusable)
self.onInit(**kwds)
#------------------------------------------------------------
def onInit(self, **kwds):
pass
#------------------------------------------------------------
def getIndex(self, column=0):
if not self.index.isValid() :
raise Exception('Persistent Model Index is not Valid!')
return self.scene().model.index(self.index.row(), column, self.index.parent())
#------------------------------------------------------------
def boundingRect(self):
return self.rect
#------------------------------------------------------------
def shape(self):
return self.shape
#------------------------------------------------------------
def paint(self, painter, option, widget):
if self.isSelected() :
self.brush.setColor(self.selected_color)
else:
self.brush.setColor(self.unselected_color)
painter.setBrush(self.brush)
painter.setPen(self.pen)
painter.drawPath(self.shape)
#------------------------------------------------------------
def data(self, column, role=Qt.DisplayRole):
if self.scene() is None :
return QtCore.QVariant()
return self.scene().data(self.getIndex(column), role)
#------------------------------------------------------------
def setData(self, column, value, role=Qt.EditRole):
if self.scene() is None :
return False
self.scene().setData(self.getIndex(column), QtCore.QVariant(value), role)
#------------------------------------------------------------
#------------------------------------------------------------
class GraphicsSceneModel(QtGui.QGraphicsScene):
#------------------------------------------------------------
def __init__(self, parent=None):
super(GraphicsSceneModel, self).__init__(parent=parent)
self.model = None
#------------------------------------------------------------
def data(self, index, role):
return self.model.data(index, role)
#------------------------------------------------------------
def setData(self, index, value, role=Qt.EditRole):
return self.model.setData(index, value, role)
#------------------------------------------------------------
def setModel(self, model):
self.clear()
self.model = model
self.populateScene()
self.disableItems()
#------------------------------------------------------------
def setSelection(self, index):
item = self.modelIndexToSceneItem(index)
if item :
item.setSelected(True)
#------------------------------------------------------------
def disableItems(self):
for item in self.items():
item.setVisible(False)
item.setEnabled(False)
#------------------------------------------------------------
def modelIndexToSceneItem(self, index):
index = index
for item in self.items() :
if self.compareIndexes(index, item.getIndex()) :
return item
return None
#------------------------------------------------------------
def compareIndexes(self, index1, index2):
if not index1.isValid() or not index2.isValid() :
return False
if index1.row() != index2.row() :
return False
if index1.internalPointer() != index2.internalPointer() :
return False
return True
#------------------------------------------------------------
def insertNode(self, node=None, parent=QtCore.QModelIndex()):
if parent.isValid() :
childCount = parent.internalPointer().childCount()
else:
childCount = 0
self.model.insertRows(childCount, 1, parent, node)
new_index = self.model.index(childCount, 0, parent)
self.addIndex(new_index, False)
return new_index
#------------------------------------------------------------
def removeNode(self, row, parent=QtCore.QModelIndex()):
if parent.isValid() :
childCount = parent.internalPointer().childCount()
else:
childCount = 0
for i in range(childCount) :
child_index = self.model.index(i, 0, parent)
self.removeIndex(child_index)
self.model.removeRows(i, 1, parent)
#------------------------------------------------------------
def populateScene(self):
pass
#------------------------------------------------------------
def addIndex(self, index):
pass
#------------------------------------------------------------
def removeIndex(self, index):
pass
| 37.417219
| 94
| 0.420354
| 5,319
| 0.941416
| 0
| 0
| 0
| 0
| 0
| 0
| 1,563
| 0.276637
|
a5734519608276ff9f8fee5a5bd77871ef93780f
| 4,461
|
py
|
Python
|
tests/test_renderers.py
|
adamchainz/classy-django-rest-framework
|
19f57d88d13f5ddd2ee33a3239c51e97829e5e6f
|
[
"MIT"
] | null | null | null |
tests/test_renderers.py
|
adamchainz/classy-django-rest-framework
|
19f57d88d13f5ddd2ee33a3239c51e97829e5e6f
|
[
"MIT"
] | null | null | null |
tests/test_renderers.py
|
adamchainz/classy-django-rest-framework
|
19f57d88d13f5ddd2ee33a3239c51e97829e5e6f
|
[
"MIT"
] | null | null | null |
import unittest
from mock import mock_open, patch
from rest_framework.generics import ListAPIView
from rest_framework_ccbv.renderers import (
BasePageRenderer, IndexPageRenderer, LandPageRenderer, ErrorPageRenderer,
SitemapRenderer, DetailPageRenderer,
)
from rest_framework_ccbv.config import VERSION
from rest_framework_ccbv.inspector import Attributes
KLASS_FILE_CONTENT = (
'{"2.2": {"rest_framework.generics": ["RetrieveDestroyAPIView", "ListAPIView"]},'
'"%s": {"rest_framework.generics": ["RetrieveDestroyAPIView", "ListAPIView"]}}' % VERSION
)
class TestBasePageRenderer(unittest.TestCase):
def setUp(self):
self.renderer = BasePageRenderer([ListAPIView])
self.renderer.template_name = 'base.html'
@patch('rest_framework_ccbv.renderers.BasePageRenderer.get_context', return_value={'foo': 'bar'})
@patch('rest_framework_ccbv.renderers.templateEnv.get_template')
@patch('rest_framework_ccbv.renderers.open', new_callable=mock_open)
def test_render(self, mock_open, get_template_mock, get_context_mock):
self.renderer.render('foo')
mock_open.assert_called_once_with('foo', 'w')
handle = mock_open()
handle.write.assert_called_once()
get_template_mock.assert_called_with('base.html')
get_template_mock.return_value.render.assert_called_with({'foo': 'bar'})
@patch('rest_framework_ccbv.renderers.templateEnv.get_template')
@patch('rest_framework_ccbv.renderers.open', mock_open())
def test_context(self, get_template_mock):
self.renderer.render('foo')
context = get_template_mock.return_value.render.call_args_list[0][0][0]
assert context['version_prefix'] == 'Django REST Framework'
assert context['version']
assert context['versions']
assert context['other_versions']
assert context['klasses'] == [ListAPIView]
class TestStaticPagesRenderered(unittest.TestCase):
def setUp(self):
self.rendererIndex = IndexPageRenderer([ListAPIView])
self.rendererLandPage = LandPageRenderer([ListAPIView])
self.rendererErrorPage = ErrorPageRenderer([ListAPIView])
@patch('rest_framework_ccbv.renderers.templateEnv.get_template')
@patch('rest_framework_ccbv.renderers.open', mock_open())
def test_template_name(self, get_template_mock):
self.rendererIndex.render('foo')
get_template_mock.assert_called_with('index.html')
self.rendererLandPage.render('foo')
get_template_mock.assert_called_with('home.html')
self.rendererErrorPage.render('foo')
get_template_mock.assert_called_with('error.html')
class TestSitemapRenderer(unittest.TestCase):
def setUp(self):
self.renderer = SitemapRenderer([ListAPIView])
@patch('rest_framework_ccbv.renderers.templateEnv.get_template')
@patch('rest_framework_ccbv.renderers.open', mock_open(read_data='{}'))
def test_context(self, get_template_mock):
self.renderer.render('foo')
context = get_template_mock.return_value.render.call_args_list[0][0][0]
assert context['latest_version']
assert context['base_url']
assert context['klasses'] == {}
class TestDetailPageRenderer(unittest.TestCase):
# @patch('rest_framework_ccbv.renderers.open', mock_open(read_data='{}'))
def setUp(self):
self.renderer = DetailPageRenderer(
[ListAPIView], ListAPIView.__name__, ListAPIView.__module__)
@patch('rest_framework_ccbv.renderers.templateEnv.get_template')
@patch('rest_framework_ccbv.renderers.open', mock_open(read_data=KLASS_FILE_CONTENT))
@patch('rest_framework_ccbv.inspector.open', mock_open(read_data=KLASS_FILE_CONTENT))
def test_context(self, get_template_mock):
self.renderer.render('foo')
context = get_template_mock.return_value.render.call_args_list[0][0][0]
assert context['other_versions'] == ['2.2']
assert context['name'] == ListAPIView.__name__
assert isinstance(context['ancestors'], (list, tuple))
assert isinstance(context['direct_ancestors'], (list, tuple))
assert isinstance(context['attributes'], Attributes)
assert isinstance(context['methods'], Attributes)
assert context['this_klass'] == ListAPIView
assert isinstance(context['children'], list)
assert context['this_module'] == ListAPIView.__module__
assert isinstance(context['unavailable_methods'], set)
| 44.61
| 101
| 0.726743
| 3,886
| 0.871105
| 0
| 0
| 3,034
| 0.680117
| 0
| 0
| 1,164
| 0.260928
|
a57349956429b4d3071a79222d869b969895aec7
| 1,320
|
py
|
Python
|
emailpal/tests/test_views.py
|
18F/django-email-pal
|
7471342741d814d19713d4353a3f566e490177a4
|
[
"CC0-1.0"
] | 5
|
2017-05-25T00:51:55.000Z
|
2020-06-13T16:37:42.000Z
|
emailpal/tests/test_views.py
|
18F/django-email-pal
|
7471342741d814d19713d4353a3f566e490177a4
|
[
"CC0-1.0"
] | 30
|
2017-05-25T00:41:45.000Z
|
2017-09-15T23:27:45.000Z
|
emailpal/tests/test_views.py
|
18F/django-email-pal
|
7471342741d814d19713d4353a3f566e490177a4
|
[
"CC0-1.0"
] | 2
|
2017-05-25T17:30:30.000Z
|
2021-02-14T11:32:33.000Z
|
import pytest
from django.conf.urls import include, url
from django.test import Client, override_settings
from .util import all_template_engines
from .test_sendable_email import MY_SENDABLE_EMAIL
urlpatterns = [
url(r'^examples/', include('emailpal.urls')),
]
@pytest.fixture
def client():
with override_settings(SENDABLE_EMAILS=[MY_SENDABLE_EMAIL],
ROOT_URLCONF=__name__):
yield Client()
@pytest.mark.parametrize('template_engine', all_template_engines())
def test_index_works(client, template_engine):
with template_engine.enable():
response = client.get('/examples/')
assert response.status_code == 200
assert 'MySendableEmail' in response.content.decode('utf-8')
def test_invalid_example_raises_404(client):
response = client.get('/examples/blarg.html')
assert response.status_code == 404
def test_valid_html_example_works(client):
response = client.get('/examples/{}.html'.format(MY_SENDABLE_EMAIL))
assert response.status_code == 200
assert 'I am HTML' in response.content.decode('utf-8')
def test_valid_plaintext_example_works(client):
response = client.get('/examples/{}.txt'.format(MY_SENDABLE_EMAIL))
assert response.status_code == 200
assert 'I am plaintext' in response.content.decode('utf-8')
| 31.428571
| 72
| 0.731818
| 0
| 0
| 151
| 0.114394
| 472
| 0.357576
| 0
| 0
| 181
| 0.137121
|
a5738d0cf40642a74790a9e4436c0e8da30c46ce
| 259
|
py
|
Python
|
db_tools/data/user_data.py
|
thebesteric/bright
|
4cd8173e7e53115395fcf25bd4db72990fdb6b3f
|
[
"MIT"
] | null | null | null |
db_tools/data/user_data.py
|
thebesteric/bright
|
4cd8173e7e53115395fcf25bd4db72990fdb6b3f
|
[
"MIT"
] | null | null | null |
db_tools/data/user_data.py
|
thebesteric/bright
|
4cd8173e7e53115395fcf25bd4db72990fdb6b3f
|
[
"MIT"
] | null | null | null |
from common.utils import crypt_utils
from bright import settings
row_data = [
{
'username': 'admin',
'password': crypt_utils.md5('admin', settings.APP_SALT),
'cellphone': '13966660426',
'email': 'admin@wesoft.com'
}
]
| 21.583333
| 64
| 0.6139
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 83
| 0.320463
|
a57546dcf10db7ae680036449e4ff2d0de0b36d3
| 2,328
|
py
|
Python
|
employee-management/app.py
|
desitomato/flask-docker
|
03dadddfbda478180554f3364e91af41b72dce87
|
[
"MIT"
] | null | null | null |
employee-management/app.py
|
desitomato/flask-docker
|
03dadddfbda478180554f3364e91af41b72dce87
|
[
"MIT"
] | null | null | null |
employee-management/app.py
|
desitomato/flask-docker
|
03dadddfbda478180554f3364e91af41b72dce87
|
[
"MIT"
] | null | null | null |
import os
from flask import Flask, request, jsonify
from flask_restful import Api
from resources.company import Company, Companylist
from resources.employee import Employee, EmployeeList
from db import db
from resources.user import UserRegister, UserLogin, UserLogout
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///data.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = 'prateek'
api = Api(app)
@app.before_first_request
def create_tables():
db.create_all()
api.add_resource(Company, '/company/<string:name>')
api.add_resource(Companylist, '/company')
api.add_resource(Employee, '/employee/<string:name>')
api.add_resource(EmployeeList, '/employee')
api.add_resource(UserRegister, '/register')
api.add_resource(UserLogin, '/login')
api.add_resource(UserLogout, '/logout/<string:username>')
if __name__ == '__main__':
db.init_app(app)
app.run(port=5000, debug=True)
#API's without flask_restful
""" companies = [{
'name': 'samsung',
'employees': [{
'name':'prateek',
'salary':10000
}]
}]
@app.route('/company', methods=['POST'])
def create_company():
request_data = request.get_json()
new_company = {'name': request_data['name'],
'employees': []
}
companies.append(new_company)
return jsonify(new_company), 201
@app.route('/company/<string:name>')
def get_company(name):
for company in companies:
if company['name'] == name:
return jsonify(company), 200
@app.route('/company')
def get_company_list():
return jsonify(companies), 200
@app.route('/company/<string:name>/employee', methods=['POST'])
def create_employee_in_company(name):
request_data = request.get_json()
print(request_data)
for company in companies:
if company['name'] == name:
new_employee = {
'name' : request_data['name'],
'salary': request_data['salary']
}
company['employees'].append(new_employee)
return jsonify(new_employee), 201
@app.route('/company/<string:name>/employee')
def get_employee_in_company(name):
for company in companies:
if company['name'] == name:
return jsonify(company['employees']), 200 """
| 25.582418
| 91
| 0.668814
| 0
| 0
| 0
| 0
| 66
| 0.028351
| 0
| 0
| 1,573
| 0.675687
|
a57794912fc7fe375ace5545bb257b022affaf27
| 1,381
|
py
|
Python
|
workon/contrib/security/models.py
|
dalou/django-workon
|
ef63c0a81c00ef560ed693e435cf3825f5170126
|
[
"BSD-3-Clause"
] | null | null | null |
workon/contrib/security/models.py
|
dalou/django-workon
|
ef63c0a81c00ef560ed693e435cf3825f5170126
|
[
"BSD-3-Clause"
] | null | null | null |
workon/contrib/security/models.py
|
dalou/django-workon
|
ef63c0a81c00ef560ed693e435cf3825f5170126
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: utf-8
from django.db import models
from django.conf import settings
class DisallowedHost(models.Model):
created_at = models.DateTimeField("Créé le", auto_now_add=True)
updated_at = models.DateTimeField("Modifié le", auto_now=True, db_index=True)
http_host = models.CharField(u"HTTP_HOST", max_length=254, null=True, blank=True)
remote_addr = models.CharField(u"REMOTE_ADDR", max_length=254, null=True, blank=True)
http_x_forwarded_for = models.CharField(u"HTTP_X_FORWARDED_FOR", max_length=254, null=True, blank=True)
request_uri = models.CharField(u"REQUEST_URI", max_length=254, null=True, blank=True)
request_method = models.CharField(u"REQUEST_METHOD", max_length=254, null=True, blank=True)
query_string = models.CharField(u"QUERY_STRING", max_length=254, null=True, blank=True)
path_info = models.CharField(u"PATH_INFO", max_length=254, null=True, blank=True)
http_user_agent = models.CharField(u"HTTP_USER_AGENT", max_length=254, null=True, blank=True)
html_report = models.TextField(u"HTML report", null=True, blank=True)
def __unicode__(self):
return "%s througt %s" % ( self.http_host, self.http_x_forwarded_for )
class Meta:
db_table = "workon_security_disallowedhost"
verbose_name = 'Intrusion'
verbose_name_plural = 'Intrusions'
ordering = ['-created_at']
| 44.548387
| 107
| 0.735699
| 1,302
| 0.940751
| 0
| 0
| 0
| 0
| 0
| 0
| 263
| 0.190029
|
a57859ecd89b9b31c6238458c1c3953448a728df
| 1,234
|
py
|
Python
|
leetcode/31.py
|
sputnikW/algorithm
|
2c9412d7fc4fdb7f71c31ee3310833014272f0c9
|
[
"MIT"
] | null | null | null |
leetcode/31.py
|
sputnikW/algorithm
|
2c9412d7fc4fdb7f71c31ee3310833014272f0c9
|
[
"MIT"
] | null | null | null |
leetcode/31.py
|
sputnikW/algorithm
|
2c9412d7fc4fdb7f71c31ee3310833014272f0c9
|
[
"MIT"
] | null | null | null |
class Solution:
def nextPermutation(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
lenNums = len(nums)
if lenNums == 1:
return
maxFromTail = -1
for i in range(lenNums - 2, -1, -1):
maxFromTail = max(nums[i + 1], maxFromTail)
if nums[i] < maxFromTail:
# find the closest number in the end of array form right to left
indexOfminDelta = -1
for j in range(lenNums - 1, i, -1):
if nums[j] - nums[i] > 0:
indexOfminDelta = j
break
# swap curr number with the closest number
temp = nums[indexOfminDelta]
nums[indexOfminDelta] = nums[i]
nums[i] = temp
# reverse the right part asc in-place
k, l = i + 1, lenNums - 1
while k < l:
temp = nums[k]
nums[k] = nums[l]
nums[l] = temp
k += 1
l -= 1
return
nums.reverse()
return
"""
T=O(N)
"""
| 30.097561
| 80
| 0.423015
| 1,218
| 0.987034
| 0
| 0
| 0
| 0
| 0
| 0
| 234
| 0.189627
|
a57be98b089324586aa986fd832a393072298d21
| 2,412
|
py
|
Python
|
leads/migrations/0009_alter_bankstatement_bank_statement_and_more.py
|
sumeet2605/CRM
|
1c9a740ef052d0e51b2689dd3e1666ff4673db98
|
[
"MIT"
] | null | null | null |
leads/migrations/0009_alter_bankstatement_bank_statement_and_more.py
|
sumeet2605/CRM
|
1c9a740ef052d0e51b2689dd3e1666ff4673db98
|
[
"MIT"
] | null | null | null |
leads/migrations/0009_alter_bankstatement_bank_statement_and_more.py
|
sumeet2605/CRM
|
1c9a740ef052d0e51b2689dd3e1666ff4673db98
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0 on 2022-02-06 09:46
from django.db import migrations, models
import leads.models
import rizcrm.storage_backends
class Migration(migrations.Migration):
dependencies = [
('leads', '0008_user_profile_picture'),
]
operations = [
migrations.AlterField(
model_name='bankstatement',
name='bank_statement',
field=models.FileField(blank=True, null=True, storage=rizcrm.storage_backends.PublicMediaStorage(), upload_to=leads.models.handle_upload_kyc),
),
migrations.AlterField(
model_name='c2cdocument',
name='card_copy',
field=models.ImageField(blank=True, null=True, storage=rizcrm.storage_backends.PublicMediaStorage(), upload_to=leads.models.handle_upload_kyc),
),
migrations.AlterField(
model_name='c2cdocument',
name='card_statement',
field=models.FileField(blank=True, null=True, storage=rizcrm.storage_backends.PublicMediaStorage(), upload_to=leads.models.handle_upload_kyc),
),
migrations.AlterField(
model_name='document',
name='company_id',
field=models.ImageField(blank=True, null=True, storage=rizcrm.storage_backends.PublicMediaStorage(), upload_to=leads.models.handle_upload_documents),
),
migrations.AlterField(
model_name='document',
name='pan_card',
field=models.FileField(blank=True, null=True, storage=rizcrm.storage_backends.PublicMediaStorage(), upload_to=leads.models.handle_upload_documents),
),
migrations.AlterField(
model_name='document',
name='photo',
field=models.ImageField(blank=True, null=True, storage=rizcrm.storage_backends.PublicMediaStorage(), upload_to=leads.models.handle_upload_documents),
),
migrations.AlterField(
model_name='salaryslip',
name='salary_slip',
field=models.FileField(blank=True, null=True, storage=rizcrm.storage_backends.PublicMediaStorage(), upload_to=leads.models.handle_upload_kyc),
),
migrations.AlterField(
model_name='user',
name='profile_picture',
field=models.ImageField(blank=True, null=True, storage=rizcrm.storage_backends.PublicMediaStorage(), upload_to='profile_pictures/'),
),
]
| 43.071429
| 161
| 0.665837
| 2,270
| 0.941128
| 0
| 0
| 0
| 0
| 0
| 0
| 289
| 0.119818
|
a57e42b92567d730da83f49a7ddb9cffb40477e6
| 28,338
|
py
|
Python
|
ipm.py
|
AVilezhaninov/STM32_IAR_ProjectManager
|
906c34c70715d5ceec4937fb8d9705318017b3e9
|
[
"MIT"
] | null | null | null |
ipm.py
|
AVilezhaninov/STM32_IAR_ProjectManager
|
906c34c70715d5ceec4937fb8d9705318017b3e9
|
[
"MIT"
] | 4
|
2017-03-10T13:06:46.000Z
|
2017-03-10T13:24:00.000Z
|
ipm.py
|
AVilezhaninov/STM32_IAR_ProjectManager
|
906c34c70715d5ceec4937fb8d9705318017b3e9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# MIT License
# Copyright (c) 2017 Aleksey Vilezhaninov
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import os
import sys
import shutil
from lxml import etree
# ------------------------------------------------------------------------------
# Help messages ----------------------------------------------------------------
# ------------------------------------------------------------------------------
MAIN_HELP_MESSAGE = '''
IPM - IAR Embedded Workbench project manager for STM32F M0, M3, M4, M7 MCU.
Program capabilities:
- create new project with standart ST CMSIS files;
- add folder struct to existing project;
- clean EWARM workspace folder;
- rename existing workspace and project;
usage: ipm <command> <args> [-h | --help]
commands:
create Create new project
add_folder Copy folder to project and add folder to project file
clean Clean workspace folder
rename_workspace Rename workspace
rename_project Rename project
rename Rename both workspace and project
For details use: ipm <command> -h
IPM v0.1 Copyright (c) 2017 Aleksey Vilezhaninov a.vilezhaninov@gmail.com
'''
CREATE_HELP_MESSAGE = '''
Create new IAR EWARM project with specified name and device.
usage: ipm create <name> <device> [-h | --help]
parameters:
-n, --name <name> New project name
-d, --device <device> New project device
Device must be specified as in "CMSIS/Device/ST/STM32Fxxx/Include/stm32fxxx.h".
For usage - download IPM executable file, IPM "template" folder and
standart ST CMSIS folder in the same folder and run program.
'''
ADD_FOLDER_HELP_MESSAGE = '''
Copy folder to project source directory and ddd folder to project file.
usage: ipm add_folder <project_path> <folder_path> [ignore] [-h | --help]
parameters:
-p, --project_path <path> Project path
-f, --folder_path <path> Folder path
-i, --ignore <ignore> Ignore file extentions
For usage - just specify project path, folder to add path and ignore
extentions devided with "/" char (for example "-i c/h/cpp/icf/").
'''
CLEAN_HELP_MESSAGE = '''
Clean workspace folder - delete all files and folders except *.eww and *.ewp.
usage: ipm clean <workspace_path> [-h | --help]
parameters:
-w, --workspace_path <path> Workspace path
For usage - just specify workspace path.
'''
RENAME_WORKSPACE_HELP_MESSAGE = '''
Rename workspace with specified name.
usage: ipm rename_workspace <workspace_path> <name> [-h | --help]
parameters:
-w, --workspace_path <path> Workspace path
-n, --name <name> New workspace name
For usage - just specify workspace path and new workspace name.
'''
RENAME_PROJECT_HELP_MESSAGE = '''
Rename project with specified name.
usage: ipm rename_project <project_path> <workspace_path> <name> [-h | --help]
parameters:
-p, --project_path <path> Project path
-w, --workspace_path <path> Workspace path
-n, --name <name> New project name
For usage - just specify project path, workspace containing this project path
and new project name.
'''
RENAME_HELP_MESSAGE = '''
Rename both workspace and project with specified name.
usage: ipm rename <project_path> <workspace_path> <name> [-h | --help]
parameters:
-p, --project_path <path> Project path
-w, --workspace_path <path> Workspace path
-n, --name <name> New project name
For usage - just specify project path, workspace containing this project path
and new project name.
'''
# ------------------------------------------------------------------------------
# Argparser configuration
# ------------------------------------------------------------------------------
def CreateArgParser():
# Parser config ------------------------------------------------------------
parser = argparse.ArgumentParser(add_help = False)
parser.add_argument("-h", "--help", action = "store_const", const = True)
subparsers = parser.add_subparsers(dest = "command")
# Create command -----------------------------------------------------------
create_parser = subparsers.add_parser("create", add_help = False)
create_parser.add_argument("-n", "--name", help = "New project name")
create_parser.add_argument("-d", "--device", help = "New project device")
create_parser.add_argument("-h", "--help", help = "Help",
action = "store_const", const = True)
# Add folder command -------------------------------------------------------
add_folder_parser = subparsers.add_parser("add_folder", add_help = False)
add_folder_parser.add_argument("-p", "--project_path",
help = "Project path")
add_folder_parser.add_argument("-f", "--folder_path",
help = "Folder path")
add_folder_parser.add_argument("-i", "--ignore",
help = "Ignore extentions")
add_folder_parser.add_argument("-h", "--help", help = "Help",
action = "store_const", const = True)
# Clean command ------------------------------------------------------------
clean_parser = subparsers.add_parser("clean", add_help = False)
clean_parser.add_argument("-w", "--workspace_path", help = "Workspace path")
clean_parser.add_argument("-h", "--help", help = "Help",
action = "store_const", const = True)
# Rename workspace command -------------------------------------------------
rename_workspace_parser = subparsers.add_parser("rename_workspace",
add_help = False)
rename_workspace_parser.add_argument("-w", "--workspace_path",
help = "Workspace path")
rename_workspace_parser.add_argument("-n", "--name",
help = "New workspace name")
rename_workspace_parser.add_argument("-h", "--help", help = "Help",
action = "store_const", const = True)
# Rename project command ---------------------------------------------------
rename_project_parser = subparsers.add_parser("rename_project",
add_help = False)
rename_project_parser.add_argument("-p", "--project_path",
help = "Project path")
rename_project_parser.add_argument("-w", "--workspace_path",
help = "Workspace path")
rename_project_parser.add_argument("-n", "--name",
help = "New project name")
rename_project_parser.add_argument("-h", "--help", help = "Help",
action = "store_const", const = True)
# Rename command -----------------------------------------------------------
rename_parser = subparsers.add_parser("rename", add_help = False)
rename_parser.add_argument("-p", "--project_path",
help = "Project path")
rename_parser.add_argument("-w", "--workspace_path",
help = "Workspace path")
rename_parser.add_argument("-n", "--name",
help = "New project and workspace name")
rename_parser.add_argument("-h", "--help", help = "Help",
action = "store_const", const = True)
return parser
# ------------------------------------------------------------------------------
# Create new IAR EWARM project with specified name and device
# ------------------------------------------------------------------------------
def Create(project_name, project_device):
if not os.path.exists(project_name):
if project_device.lower()[0:6] == "stm32f":
# Copy source files and folders
CopyEWARMFiles(project_name)
CopyCMSISFiles(project_name, project_device)
ChangeProjectFile(project_name, project_device)
# Create user folders
MakeDir(project_name + "/source/user/inc")
MakeDir(project_name + "/source/user/src")
# Copy main.c to project source folder
shutil.copy2("./template/template_main.c",
project_name + "/source")
text_to_replace = '#include "stm32f4xx.h"'
replace_text = '#include "stm32f' + project_device[6] + 'xx.h"'
ReplaceTextInFile(project_name + "/source/template_main.c",
text_to_replace, replace_text)
# Rename template_main.c
rename_path = project_name + "/source"
try:
os.rename(rename_path + "/template_main.c",
rename_path + "/main.c")
except OSError:
Exit("Can not rename \"" + rename_path +
"/template_main.c\" file")
else:
Exit("Undefined device")
else:
Exit("\"" + project_name + "\" folder already exists")
# Copy and rename EWARM workspace and project template files
def CopyEWARMFiles(project_name):
if os.path.exists("template"):
# Create EWARM folder
MakeDir(project_name + "/EWARM")
# Copy template files
src = "template/template.eww"
dst = project_name + "/EWARM"
CopyFile(src, dst)
src = "template/template.ewp"
dst = project_name + "/EWARM"
CopyFile(src, dst)
# Rename template files in EWARM folder
project_file = project_name + "/EWARM/template.ewp"
workspace_file = project_name + "/EWARM/template.eww"
RenameProject(project_file, workspace_file, project_name)
RenameWorkspace(workspace_file, project_name)
else:
Exit("Can not find \"template\" folder")
# Copy CMSIS files in project CMSIS folder
def CopyCMSISFiles(project_name, project_device):
device = project_device.lower()
device_family = project_device[0:7].upper() + "xx"
if os.path.exists("CMSIS"):
# Copy ./CMSIS/Include folder with all files
src = "CMSIS/Include"
dst = project_name + "/source/CMSIS/Include"
CopyTree(src, dst)
# Copy CMSIS"s files and create folders
directory = project_name + "/source/CMSIS/Lib/ARM"
MakeDir(directory)
directory = project_name + "/source/CMSIS/Device/ST/"
directory += device_family + "/Include"
MakeDir(directory)
directory = project_name + "/source/CMSIS/Device/ST/"
directory += device_family + "/Source/iar/linker"
MakeDir(directory)
src = "CMSIS/Device/ST/" + device_family
src += "/Include/" + device_family.lower() + ".h"
dst = project_name + "/source/CMSIS/Device/ST/"
dst += device_family + "/Include"
CopyFile(src, dst)
src = "CMSIS/Device/ST/" + device_family
src += "/Include/" + device + ".h"
dst = project_name + "/source/CMSIS/Device/ST/"
dst += device_family + "/Include"
CopyFile(src, dst)
src = "CMSIS/Device/ST/" + device_family
src += "/Include/system_" + device_family.lower() + ".h"
dst = project_name + "/source/CMSIS/Device/ST/"
dst += device_family + "/Include"
CopyFile(src, dst)
src = "CMSIS/Device/ST/" + device_family
src += "/Source/Templates/" + "system_" + device_family.lower() + ".c"
dst = project_name + "/source/CMSIS/Device/ST/"
dst += device_family + "/Source"
CopyFile(src, dst)
src = "CMSIS/Device/ST/" + device_family
src += "/Source/Templates/iar/" + "startup_" + device + ".s"
dst = project_name + "/source/CMSIS/Device/ST/"
dst += device_family + "/Source/iar"
CopyFile(src, dst)
src = "CMSIS/Device/ST/" + device_family
src += "/Source/Templates/iar/linker/" + device + "_flash.icf"
dst = project_name + "/source/CMSIS/Device/ST/"
dst += device_family + "/Source/iar/linker"
CopyFile(src, dst)
else:
Exit("Can not find \"CMSIS\" folder")
# Change template lines in project file
def ChangeProjectFile(project_name, device):
device = device.lower()
device_family = device[0:7].upper() + "xx"
# Define project file path
project_file = project_name + "/EWARM/" + project_name + ".ewp"
# Define path to CMSIS device family folder
CMSIS_ST_template_path = "$PROJ_DIR$\..\source\CMSIS\Device\ST\STM32F4xx"
CMSIS_ST_path = "$PROJ_DIR$\..\source\CMSIS\Device\ST\\" + device_family
# Repalce device definition
text_to_replace = "STM32F407xx"
replace_text = device.upper()[0:9] + device.lower()[9:]
ReplaceTextInFile(project_file, text_to_replace, replace_text)
# Replace CMSIS include path
text_to_replace = CMSIS_ST_template_path + "\Include"
replace_text = CMSIS_ST_path + "\Include"
ReplaceTextInFile(project_file, text_to_replace, replace_text)
# Replace linker path
text_to_replace = CMSIS_ST_template_path
text_to_replace += "\Source\iar\linker\stm32f407xx_flash.icf"
replace_text = CMSIS_ST_path
replace_text += "\Source\iar\linker\\" + device + "_flash.icf"
ReplaceTextInFile(project_file, text_to_replace, replace_text)
# Repalce folder and file paths
text_to_replace = "<name>STM32F4xx</name>"
replace_text = "<name>" + device_family + "</name>"
ReplaceTextInFile(project_file, text_to_replace, replace_text)
text_to_replace = CMSIS_ST_template_path + "\Include\stm32f407xx.h"
replace_text = CMSIS_ST_path + "\Include\\" + device + ".h"
ReplaceTextInFile(project_file, text_to_replace, replace_text)
text_to_replace = CMSIS_ST_template_path + "\Include\stm32f4xx.h"
replace_text = CMSIS_ST_path + "\Include\\" + device_family.lower() + ".h"
ReplaceTextInFile(project_file, text_to_replace, replace_text)
text_to_replace = CMSIS_ST_template_path + "\Include\system_stm32f4xx.h"
replace_text = CMSIS_ST_path + "\Include\system_"
replace_text += device_family.lower() + ".h"
ReplaceTextInFile(project_file, text_to_replace, replace_text)
text_to_replace = CMSIS_ST_template_path
text_to_replace += "\Source\iar\linker\stm32f412rx_flash.icf"
replace_text = CMSIS_ST_path +"\Source\iar\linker\\" + device + "_flash.icf"
ReplaceTextInFile(project_file, text_to_replace, replace_text)
text_to_replace = CMSIS_ST_template_path
text_to_replace += "\Source\iar\startup_stm32f407xx.s"
replace_text = CMSIS_ST_path + "\Source\iar\startup_" + device + ".s"
ReplaceTextInFile(project_file, text_to_replace, replace_text)
text_to_replace = CMSIS_ST_template_path + "\Source\system_stm32f4xx.c"
replace_text = CMSIS_ST_path + "\Source\system_" + device_family + ".c"
ReplaceTextInFile(project_file, text_to_replace, replace_text)
# Define device core file
device_f_series = device[6]
if device_f_series == "0":
device_core = "core_cm0.h"
elif device_f_series == "1" or device_f_series == "2":
device_core = "core_cm3.h"
elif device_f_series == "3" or device_f_series == "4":
device_core = "core_cm4.h"
elif device_f_series == "7":
device_core = "core_cm7.h"
else:
Exit("Can not define device core")
text_to_replace = "$PROJ_DIR$\..\source\CMSIS\Include\core_cm4.h"
replace_text = "$PROJ_DIR$\..\source\CMSIS\Include\\" + device_core
ReplaceTextInFile(project_file, text_to_replace, replace_text)
# Replace output .hex and .out files name
ReplaceTextInFile(project_file, "template.hex", project_name + ".hex")
ReplaceTextInFile(project_file, "tempalte.out", project_name + ".out")
# ------------------------------------------------------------------------------
# Copy folder to project source directory. Add folder in project file
# ------------------------------------------------------------------------------
def AddFolder(project_path, folder_path, ignore_list):
if os.path.isfile(project_path):
if project_path.endswith(".ewp"):
if os.path.exists(folder_path):
# Copy folder to project
folder_path = DecoratePath(folder_path)
src = folder_path
dst = "/".join(project_path.split("/")[0:-2])
dst += "/source/" + src.split("/")[-1]
if os.path.exists(dst):
Exit("Folder \"" + dst + "\" exists")
CopyTree(src, dst)
# Add folder struct in project file
tree = etree.parse(project_path)
root = tree.getroot()
start_path_pos = len(folder_path.split("/")) - 1
elements = ParseFolder(folder_path, etree.Element("project"),
ignore_list, start_path_pos, True)
for node in elements:
text_node = etree.tostring(node, pretty_print = True)
root.append(etree.XML(text_node))
xml_file = open(project_path, "wb")
xml_file.write(etree.tostring(root, pretty_print = True,
encoding = "iso-8859-1", xml_declaration = True))
xml_file.close()
else:
Exit("Can not find \"" + folder_path + "\" folder")
else:
Exit("\"" + project_path + "\" is not *.ewp file")
else:
Exit("Can not find: \"" + project_path + "\" file")
# Parse foder and add subfolders and files in XML tree
def ParseFolder(folder_path, parent_node, ignore_list,
start_path_pos, first_entry):
if first_entry:
append_node = AppendNode("group", parent_node,
folder_path.split("/")[-1])
else:
append_node = parent_node
for item in os.listdir(folder_path):
item_path = folder_path + "/" + item
if os.path.isfile(item_path):
path = "$PROJ_DIR$/../source/"
path += "/".join(folder_path.split("/")[start_path_pos:])
path += "/" + item
if ignore_list != None:
if not any(item.endswith(x) for x in ignore_list.split("/")):
AppendNode("file", append_node, path)
else:
AppendNode("file", append_node, path)
else:
sub_node = AppendNode("group", append_node, item)
ParseFolder(item_path, sub_node, ignore_list, start_path_pos, False)
return parent_node
# Append node in XML tree
def AppendNode(node_tag, parent_node, node_name):
tag = etree.Element(node_tag)
parent_node.append(tag)
tag_name = etree.Element("name")
tag_name.text = node_name
tag.append(tag_name)
return tag
# ------------------------------------------------------------------------------
# Clean workspace folder - delete all files and folders except *.eww and *.ewp
# ------------------------------------------------------------------------------
def Clean(workspace_path):
if os.path.isfile(workspace_path):
if workspace_path.endswith(".eww"):
workspace_folder = workspace_path.split("/")[0:-1]
workspace_folder = "/".join(workspace_folder)
for item in os.listdir(workspace_folder):
item_path = workspace_folder + "/" + item
if os.path.isfile(item_path):
if not item.endswith(".eww") and not item.endswith(".ewp"):
try:
os.remove(item_path)
except OSError:
Exit("Can not delete \"" + item_path + "\" file")
else:
try:
shutil.rmtree(item_path, True)
except IOError:
Exit("Can not delete \"" + item_path + "\" folder")
else:
Exit("\"" + workspace_path + "\" is not *.eww file")
else:
Exit("Can not find: \"" + workspace_path + "\" file")
# ------------------------------------------------------------------------------
# Rename workspace with specified name
# ------------------------------------------------------------------------------
def RenameWorkspace(workspace_path, new_workspace_name):
if os.path.isfile(workspace_path):
if workspace_path.endswith(".eww"):
rename_path = workspace_path.split("/")
rename_path[-1] = new_workspace_name + ".eww"
rename_path = "/".join(rename_path)
try:
os.rename(workspace_path, rename_path)
except OSError:
Exit("Can not rename \"" + workspace_path + "\" file")
else:
Exit("\"" + workspace_path + "\" is not *.eww file")
else:
Exit("Can not find: \"" + workspace_path + "\" file")
# ------------------------------------------------------------------------------
# Rename project with specified name
# ------------------------------------------------------------------------------
def RenameProject(project_path, workspace_path, new_project_name):
if os.path.isfile(project_path):
if os.path.isfile(workspace_path):
if project_path.endswith(".ewp"):
if workspace_path.endswith(".eww"):
rename_path = project_path.split("/")
old_project_name = rename_path[-1]
rename_path[-1] = new_project_name + ".ewp"
rename_path = "/".join(rename_path)
try:
os.rename(project_path, rename_path)
except OSError:
Exit("Can non rename \"" + project_path + "\" file")
text_to_replace = "$WS_DIR$\\" + old_project_name
replace_text = "$WS_DIR$\\" + new_project_name + ".ewp"
ReplaceTextInFile(workspace_path, text_to_replace,
replace_text)
else:
Exit("\"" + workspace_path + "\" is not *.eww file")
else:
Exit("\"" + project_path + "\" is not *.ewp file")
else:
Exit("Can not find: \"" + workspace_path + "\" file")
else:
Exit("Can not find: \"" + project_path + "\" file")
# ------------------------------------------------------------------------------
# Common functions
# ------------------------------------------------------------------------------
# Replace text in file
def ReplaceTextInFile(file_name, text_to_replace, replace_text):
if os.path.exists(file_name):
try:
file = open(file_name, "r")
text = file.read()
file.close()
file = open(file_name, "w")
file.write(text.replace(text_to_replace, replace_text))
file.close()
except IOError:
Exit("Can not handle \"" + file_name + "\" file")
else:
Exit("Can not find \"" + file_name + "\" file")
# Copy folder tree
def CopyTree(src, dst, symlinks = False, ignore = None):
if not os.path.exists(dst):
MakeDir(dst)
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
try:
shutil.copytree(s, d, symlinks, ignore)
except IOError:
Exit("Can not copy \"" + s + "\" folder")
else:
CopyFile(s, d)
# Make directory
def MakeDir(directory):
try:
os.makedirs(directory)
except OSError:
Exit("Can not create \"" + directory + "\" folder")
# Copy file
def CopyFile(src, dst):
try:
shutil.copy2(src, dst)
except IOError:
Exit("Can not copy \"" + src + "\"")
# Decorate path to next template "folder/subfolder/file.xxx"
def DecoratePath(path):
if path.endswith("/"):
path = "/".join(path.split("/")[0:-1])
if path.startswith("./"):
path = "/".join(path.split("/")[1:])
return path
# Print message and exit
def Exit(exit_message):
print(exit_message)
exit(1)
# ------------------------------------------------------------------------------
# Main
# ------------------------------------------------------------------------------
if __name__ == "__main__":
arg_parser = CreateArgParser()
arg_parser_namespace = arg_parser.parse_args()
# Create command
if arg_parser_namespace.command == "create":
if (arg_parser_namespace.help == True or
arg_parser_namespace.name == None or
arg_parser_namespace.device == None):
Exit(CREATE_HELP_MESSAGE)
else:
Create(arg_parser_namespace.name, arg_parser_namespace.device)
# Add folder command
elif arg_parser_namespace.command == "add_folder":
if (arg_parser_namespace.help == True or
arg_parser_namespace.project_path == None or
arg_parser_namespace.folder_path == None):
Exit(ADD_FOLDER_HELP_MESSAGE)
else:
AddFolder(arg_parser_namespace.project_path,
arg_parser_namespace.folder_path,
arg_parser_namespace.ignore)
# Clean command
elif arg_parser_namespace.command == "clean":
if (arg_parser_namespace.help == True or
arg_parser_namespace.workspace_path == None):
Exit(CLEAN_HELP_MESSAGE)
else:
Clean(arg_parser_namespace.workspace_path)
# Rename workspace command
elif arg_parser_namespace.command == "rename_workspace":
if (arg_parser_namespace.help == True or
arg_parser_namespace.workspace_path == None or
arg_parser_namespace.name == None):
Exit(RENAME_WORKSPACE_HELP_MESSAGE)
else:
RenameWorkspace(arg_parser_namespace.workspace_path,
arg_parser_namespace.name)
# Rename project command
elif arg_parser_namespace.command == "rename_project":
if (arg_parser_namespace.help == True or
arg_parser_namespace.project_path == None or
arg_parser_namespace.workspace_path == None or
arg_parser_namespace.name == None):
Exit(RENAME_PROJECT_HELP_MESSAGE)
else:
RenameProject(arg_parser_namespace.project_path,
arg_parser_namespace.workspace_path,
arg_parser_namespace.name)
# Rename command
elif arg_parser_namespace.command == "rename":
if (arg_parser_namespace.help == True or
arg_parser_namespace.project_path == None or
arg_parser_namespace.workspace_path == None or
arg_parser_namespace.name == None):
Exit(RENAME_HELP_MESSAGE)
else:
RenameProject(arg_parser_namespace.project_path,
arg_parser_namespace.workspace_path,
arg_parser_namespace.name)
RenameWorkspace(arg_parser_namespace.workspace_path,
arg_parser_namespace.name)
# Undefined command
else:
Exit(MAIN_HELP_MESSAGE)
| 38.979367
| 80
| 0.571071
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11,080
| 0.390994
|