max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
my-rl/qlearning_agent.py
|
siowyisheng/rl-learning
| 0
|
12780451
|
<gh_stars>0
# my implementation of a q-learning agent
import random
import numpy as np
class QLearningAgent:
"""An agent which uses Q learning to optimize actions in an environment."""
def __init__(self, alpha, gamma):
self._Q = {}
self.alpha = alpha
self.gamma = gamma
def decide(self, state, action_space, epsilon):
if np.random.random() < epsilon:
return random.choice(action_space)
else:
return _best_action(self._Q, state, action_space)
action = self.agent.decide(state, action_space, epsilon)
def learn(self, state, action, next_state, reward, action_space):
alpha = self.alpha
old_value = self._Q.get((state, action), 0)
next_best_action = _best_action(self._Q, next_state, action_space)
next_value = self._Q.get((next_state, next_best_action), 0)
discounted_return = reward + self.gamma * next_value
self._Q[state, action] = (1 - alpha) * old_value + (
alpha * discounted_return) # yapf: ignore
def _best_action(Q, state, action_space):
values = np.array([Q.get((state, a), 0) for a in action_space])
return action_space[np.argmax(values)]
| 3.25
| 3
|
scorebot/triggers/team_triggers.py
|
cloudlinux/scorebot
| 3
|
12780452
|
from scorebot.db import db_api, db_utils
from scorebot.triggers.triggers_base import TeamTrigger, GroupOfTeamTriggers
team_triggers = {
db_api.PatchsetProposed.type: GroupOfTeamTriggers(
score_db_api=db_api.PatchsetProposed,
triggers=[
# week
TeamTrigger(
name="team_patchset_proposed_for_week1",
get_date_scope=db_utils.DateScope.this_week,
trigger_point=50,
msg="{owner} making good progress this week, posted "
"their 50th patch-set",
),
TeamTrigger(
name="team_patchset_proposed_for_week2",
get_date_scope=db_utils.DateScope.this_week,
trigger_point=100,
msg="{owner} is really active this week, they just posted "
"their 100th patch-set!",
),
# month
TeamTrigger(
name="team_patchset_proposed_for_month1",
get_date_scope=db_utils.DateScope.this_month,
trigger_point=200,
msg="{owner} made 200 patchsets this month! Cheers!",
),
TeamTrigger(
name="team_patchset_proposed_for_month1",
get_date_scope=db_utils.DateScope.this_month,
trigger_point=300,
msg="{owner} even reached 300 patchsets this month!",
),
]
),
db_api.PatchsetMerged.type: GroupOfTeamTriggers(
score_db_api=db_api.PatchsetMerged,
triggers=[
# today
TeamTrigger(
name="team_patchset_merged_for_today1",
get_date_scope=db_utils.DateScope.today,
trigger_point=5,
msg="{owner} just merged their 5th review today!"
),
# week
TeamTrigger(
name="team_patchset_merged_for_week1",
get_date_scope=db_utils.DateScope.this_week,
trigger_point=10,
msg="{owner} is doing great! 10 reviews landed this week!"
),
# month
TeamTrigger(
name="team_patchset_merged_for_month1",
get_date_scope=db_utils.DateScope.this_month,
trigger_point=30,
msg="{owner} rocks! 30 reviews merged this month already!"
),
TeamTrigger(
name="team_patchset_merged_for_month2",
get_date_scope=db_utils.DateScope.this_month,
trigger_point=50,
msg="{owner} keeps putting +2 randomly: 50 reviews merged "
"this month. And looks like the're not gonna stop!"
)
]
),
db_api.PatchsetReviewed.type: GroupOfTeamTriggers(
score_db_api=db_api.PatchsetReviewed,
triggers=[
# week
TeamTrigger(
name="team_patchset_reviewed_for_week1",
get_date_scope=db_utils.DateScope.this_week,
trigger_point=50,
msg="{owner} is really keeps an eye on each other, made 50 "
"reviews this week already!"
),
# month
TeamTrigger(
name="team_patchset_reviewed_for_month1",
get_date_scope=db_utils.DateScope.this_month,
trigger_point=100,
msg="{owner} has great review activity this month, reached "
"100 reviews just now!"
)
]
)
}
| 2.078125
| 2
|
scripts/conv_exp_hospital_section.py
|
wecacuee/modern-occupancy-grid
| 21
|
12780453
|
import convergence_experiment
def executables():
return convergence_experiment.executables("Data/hospital_section_player/",
2000)
| 1.296875
| 1
|
PLM/configs/profileConfigs.py
|
vtta2008/pipelineTool
| 7
|
12780454
|
# -*- coding: utf-8 -*-
"""
Script Name:
Author: <NAME>/Jimmy - 3D artist.
Description:
"""
# -------------------------------------------------------------------------------------------------------------
""" Import """
VFX_PROJ_PROFILER = dict(
APPS = ["maya", "zbrush", "mari", "nuke", "photoshop", "houdini", "after effects"],
MASTER = ["assets", "sequences", "deliverables", "docs", "editorial", "sound", "rcs", "RnD"],
TASKS = ["art", "plt_model", "rigging", "surfacing"],
SEQTASKS = ["anim", "comp", "fx", "layout", "lighting"],
ASSETS = {"heroObj": ["washer", "dryer"],
"main character"
"environment": [],
"props": []},
STEPS = ["publish", "review", "work"],
MODELING = ["scenes", "fromZ", "toZ", "objImport", "objExport", "movie"],
RIGGING = ["scenes", "reference"],
SURFACING = ["scenes", "sourceimages", "images", "movie"],
LAYOUT = ["scenes", "sourceimages", "images", "movie", "alembic"],
LIGHTING = ["scenes", "sourceimages", "images", "cache", "reference"],
FX = ["scenes", "sourceimages", "images", "cache", "reference", "alembic"],
ANIM = ["scenes", "sourceimages", "images", "movie", "alembic"],
)
# -------------------------------------------------------------------------------------------------------------
# Created by <NAME> on 5/6/2020 - 3:13 AM
# © 2017 - 2020 DAMGteam. All rights reserved
| 1.429688
| 1
|
sail_on_client/harness/__init__.py
|
darpa-sail-on/sail-on-client
| 1
|
12780455
|
<gh_stars>1-10
"""Sail On client harness package."""
| 0.898438
| 1
|
learning-phases/src/neuralnetwork/network.py
|
vonhachtaugust/learning-phases
| 0
|
12780456
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from keras.models import Sequential, Model
from keras.models import model_from_yaml
import keras.backend as K
from keras.callbacks import Callback
from ..utility.utils import path
def custom_uniform(shape, range=(-1, 1), name=None):
"""
Example of custom function for keras.
"""
min_, max_ = range
return K.variable(
np.random.uniform(low=min_, high=max_, size=shape), name=name)
# Example usage:
# net.add(Dense(10, input_dim=5, init=lambda shape,
# name: custom_uniform(shape, (-10, 5), name)))
class TestCallback(Callback):
"""
Example callback class for keras.
"""
def __init__(self, generator):
self.data_generator = generator
def on_epoch_end(self, epoch, logs={}):
x, y = next(self.data_generator)
loss, acc = self.model.evaluate(x, y, verbose=0)
print('\nTesting loss: {}, acc: {}\n'.format(loss, acc))
class Network(object):
"""
Base class for the various neural networks.
"""
def __init__(self):
self.metrics = ()
self.model = Sequential()
def first_layer_output(self, x):
weights = self.get_layer_weights(1)
W = weights[0]
b = weights[1]
return np.dot(x, W) + b
def predict_on_batch(self, x):
return self.model.predict_on_batch(x)
def get_weights(self, layer=None):
if layer is None:
return self.model.get_weights()
return self.model.layers[layer].get_weights()
def weight_shapes(self):
return self.get_weights()[0].shape, self.get_weights()[1].shape
def set_layer_weights(self, layer, weights):
self.model.layers[layer].set_weights(
[weights, self.get_weights(layer)[1]])
def set_layer_bias(self, layer, bias):
self.model.layers[layer].set_weights(
[self.get_weights(layer)[0], bias])
def set_layer_parameters(self, layer, weights, bias):
self.model.layers[layer].set_weights([weights, bias])
def get_layer_weights(self, layer):
return self.model.get_layer(index=layer).get_weights()
def train_once(self, data, batch_size):
self.model.fit(data[0], data[1], epochs=1, batch_size=batch_size)
def train_on_generator(self, training_set_generator, batches_per_epoch,
epochs, verbose):
h = self.model.fit_generator(
training_set_generator, batches_per_epoch, epochs, verbose=verbose)
loss = h.history['loss'][epochs - 1]
acc = h.history['categorical_accuracy'][epochs - 1]
self.metrics = '{0:.3g}'.format(loss), '{0:.3g}'.format(acc)
def save(self, relative_path, filename=None):
if filename is None:
filename = 'model'
absolute_path = ''.join([path(), relative_path, filename])
network_out = ''.join([absolute_path, '.yaml'])
weight_out = ''.join([absolute_path, '.h5'])
model_yaml = self.model.to_yaml()
with open(network_out, 'w') as yaml_file:
yaml_file.write(model_yaml)
self.model.save_weights(weight_out)
def load(self, relative_path, filename):
absolute_path = ''.join([path(), relative_path, filename])
network = ''.join([absolute_path, '.yaml'])
weights = ''.join([absolute_path, '.h5'])
with open(network, 'r') as yaml_file:
loaded_model_yaml = yaml_file.read()
self.model = model_from_yaml(loaded_model_yaml)
self.model.load_weights(weights)
| 3.09375
| 3
|
pyabsa/network/lcf_pooler.py
|
yangheng95/LCF-ABSA
| 31
|
12780457
|
<filename>pyabsa/network/lcf_pooler.py
# -*- coding: utf-8 -*-
# file: lcf_pooler.py
# time: 2021/6/29
# author: yangheng <<EMAIL>>
# github: https://github.com/yangheng95
# Copyright (C) 2021. All Rights Reserved.
import numpy
import torch
import torch.nn as nn
class LCF_Pooler(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states, lcf_vec):
device = hidden_states.device
lcf_vec = lcf_vec.detach().cpu().numpy()
pooled_output = numpy.zeros((hidden_states.shape[0], hidden_states.shape[2]), dtype=numpy.float32)
hidden_states = hidden_states.detach().cpu().numpy()
for i, vec in enumerate(lcf_vec):
lcf_ids = [j for j in range(len(vec)) if sum(vec[j] - 1.) == 0]
pooled_output[i] = hidden_states[i][lcf_ids[len(lcf_ids) // 2]]
pooled_output = torch.Tensor(pooled_output).to(device)
pooled_output = self.dense(pooled_output)
pooled_output = self.activation(pooled_output)
return pooled_output
| 2.4375
| 2
|
tests/letter_tests2.py
|
subramani95/open-tamil
| 0
|
12780458
|
# -*- coding: utf-8 -*-
# (C) 2017 <NAME>
#
# This file is part of 'open-tamil' package tests
#
# setup the paths
from opentamiltests import *
import tamil.utf8 as utf8
from tamil.tscii import TSCII
import codecs
if PYTHON3:
class long(int):
pass
class Letters(unittest.TestCase):
def test_uyir_mei_split(self):
ak = utf8.splitMeiUyir(u"ஃ")
self.assertEqual(ak,u"ஃ")
il = utf8.splitMeiUyir(u"ல்")
self.assertEqual(il,u"ல்")
il,ee = utf8.splitMeiUyir(u"லி")
self.assertEqual((il,ee),(u"ல்",u"இ"))
def test_classifier(self):
expected = []
expected.extend(['english']*3)
expected.extend(['digit']*4)
expected.extend(['kuril','nedil','uyirmei','vallinam','uyirmei'])
data = list(map(utf8.classify_letter,utf8.get_letters(u"abc1230அஆரெட்டை")))
self.assertEqual(data,expected)
def demo(self):
for l in utf8.get_letters_iterable(u"இதுதாண்டாபோலிசு"):
print("%s - %s"%(l,utf8.classify_letter(l)))
def test_classified_except(self):
with self.assertRaises(ValueError) as ve:
utf8.classify_letter(u'.')
if __name__ == '__main__':
unittest.main()
| 2.859375
| 3
|
examples/EnumEditor_demo.py
|
rwl/traitsbackendpyjamas
| 1
|
12780459
|
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.
"""
Implementation of an EnumEditor demo for Traits UI
This demo shows each of the four styles of the EnumEditor
Fixme: This only shows the capabilities of the old-style EnumEditor
"""
# Imports:
from enthought.traits.api \
import HasTraits, Enum
from enthought.traits.ui.api \
import Item, Group, View
# Define the demo class:
class EnumEditorDemo ( HasTraits ):
""" Defines the main EnumEditor demo class. """
# Define an Enum trait to view:
name_list = Enum( 'A-495', 'A-498', 'R-1226', 'TS-17', 'TS-18' )
# Items are used to define the display, one Item per editor style:
enum_group = Group(
Item( 'name_list', style = 'simple', label = 'Simple' ),
Item( '_' ),
Item( 'name_list', style = 'custom', label = 'Custom' ),
Item( '_' ),
Item( 'name_list', style = 'text', label = 'Text' ),
Item( '_' ),
Item( 'name_list', style = 'readonly', label = 'ReadOnly' )
)
# Demo view:
view = View(
enum_group,
title = 'EnumEditor',
buttons = ['OK'],
resizable = True
)
# Create the demo:
demo = EnumEditorDemo()
# Run the demo (if invoked from the command line):
if __name__ == '__main__':
demo.configure_traits()
| 2.046875
| 2
|
solutions/solution970.py
|
Satily/leetcode_python_solution
| 3
|
12780460
|
class Solution:
def powerfulIntegers(self, x, y, bound):
"""
:type x: int
:type y: int
:type bound: int
:rtype: List[int]
"""
def powers(n, bound):
if n == 1:
return [1]
power_list = []
s = 1
while s <= bound:
power_list.append(s)
s *= n
return power_list
result = set()
x_powers = powers(x, bound)
y_powers = powers(y, bound)
for x_power in x_powers:
for y_power in y_powers:
if x_power + y_power <= bound:
result.add(x_power + y_power)
return list(result)
if __name__ == "__main__":
print(Solution().powerfulIntegers(2, 3, 10))
print(Solution().powerfulIntegers(3, 5, 15))
print(Solution().powerfulIntegers(1, 2, 100))
| 3.765625
| 4
|
ci.py
|
mhalano/mad_scientists_lab
| 0
|
12780461
|
<gh_stars>0
#! /usr/bin/env python3
import os
import sys
import yaml
import git
import subprocess
tempdir = "/tmp"
targetdir = "ci"
config_file="pipeline.yml"
pipeline_name=sys.argv[1]
repo_address=sys.argv[2]
#def check_arguments() :
#Checa se os dois argumentos da linha de comando existem e sao validos
#erro se algo der errado.
#def clone_repo(address, directory):
try:
git.Git(tempdir).clone(repo_address, targetdir)
print("Repositorio clonado")
except:
pass
os.chdir(tempdir + "/" + targetdir)
retval = os.getcwd()
print ("Current working directory %s" % retval)
#erro se o arquivo nao existir
#def load_config(filename):
file = open(config_file, "r")
data = yaml.safe_load(file) #a variavel precisa ser publica se nao so funciona dentro do metodo.
# erro se o passo nao existir na task
#def do_step(step_name):
# else:
# print("Task unknown")
#muda para o diretório dos fontes
#chdir
#def run_pipeline(pipeline_name):
for pipeline in data['pipelines']:
# print("pipeline = " + str(pipeline))
if pipeline['name'] == pipeline_name:
for step in pipeline['tasks']:
# print("pipeline_tasks = " + str(pipeline['tasks']))
# print("step = " + str(step))
# print("data_tasks = " + str(data['tasks']))
if step in data['tasks']:
# print(step)
print(data['tasks'][step])
# precisa ser uma lista de argumentos
# call((data['tasks'][step]))
subprocess.run(data['tasks'][step], shell=True)
# os.system(data['tasks'][step])
#def main():
# try:
#Check_arguments
# print("Antes de clonar")
# print(repo_address + temp_dir)
# clone_repo(repo_address, temp_dir)
# print("depois de clonar")
# print("Antes de carregar o yaml")
# data = load_config(temp_dir + "/" + target_dir + "/" + config_file)
# print("depois de carregar o yaml")
# print(data)
# print("antes de rodar a pipeline")
# run_pipeline(pipeline_name)
# print("depois de rodar a pipeline")
# except:
# print("erro")
#main()
| 2.734375
| 3
|
cypherpunkpay/web/views_admin/admin_charge_views.py
|
RandyMcMillan/CypherpunkPay
| 44
|
12780462
|
<reponame>RandyMcMillan/CypherpunkPay<gh_stars>10-100
from pyramid.view import (view_config)
from cypherpunkpay import App
from cypherpunkpay.usecases.report_charges_uc import ReportChargesUC
from cypherpunkpay.web.views_admin.admin_base_view import AdminBaseView
class AdminChargeViews(AdminBaseView):
@view_config(route_name='get_admin_charges', permission='admin', renderer='web/html/admin/charges.jinja2')
def get_admin_charges(self):
charges = App().db().get_recently_created_charges()
cr_7d, cr_all_time = ReportChargesUC(self.db()).exec()
return {
'title': 'Admin Charges',
'charges': charges,
'cr': cr_all_time
}
| 1.96875
| 2
|
src/sweetrpg_COMPONENT_web/application/auth/__init__.py
|
sweetrpg/web-template
| 0
|
12780463
|
# -*- coding: utf-8 -*-
__author__ = "<NAME> <<EMAIL>>"
"""
"""
from flask_oauthlib.provider import OAuth2Provider
# from authlib.integrations.flask_client import OAuth
from flask import current_app
import logging
from sweetrpg_COMPONENT_web.application import constants
import os
| 1.351563
| 1
|
main.py
|
GryPr/Vireo
| 0
|
12780464
|
<filename>main.py
from utilities import startup
def main() -> None:
startup.run_sanity_checks()
bot = startup.create_bot()
startup.load_extensions(bot, 'cogs')
startup.run_bot(bot)
if __name__ == "__main__":
main()
| 1.625
| 2
|
dictionaries.py
|
tiagopariz/FaztCursoPythonParaPrincipiantes
| 0
|
12780465
|
<filename>dictionaries.py
# list
cart = [
["book1", 3, 4.99],
["book2", 3, 6.99],
["book3", 3, 15.99]
]
# Dictionary
product = {
"name": "Book1",
"quantity": 3,
"price": 3
}
print(type(product)) # <class 'dict'>
person = {
"first_name": "ryan",
"last_name": "ray"
}
print(type(person)) # <class 'dict'>
print(dir(person)) # help
print(person.keys())
print(person.items())
person.clear()
print(person)
del person;
# print(person)
products = [
{"name": "Book", "price": 10.99},
{"name": "Laptop", "price": 100.99}
]
print(products)
| 3.453125
| 3
|
src/regression/linear_regression.py
|
eric-ycw/solomon
| 0
|
12780466
|
import numpy as np
import sys
sys.path.insert(0, '..')
from src.utils import *
class LinearRegression:
def __init__(self):
self.params = None
def train(self, X, y, iterations=5000, learning_rate=0.01, display=False):
'''
Input parameters:
X: (mxn) array where m is the number of training examples and n is the number of features
y: (mx1) array with target values
'''
# We initialize parameters as a (1xn) array of zeros
self.params = np.zeros((X.shape[1], 1))
loss_hist = np.zeros((1,0))
for i in range(iterations):
y_hat = X.dot(self.params)
loss = MeanSquaredError.loss(y, y_hat)
loss_hist = np.append(loss_hist, loss)
self.params = BatchGradientDescent.optimize(
X, y, y_hat, self.params, learning_rate, MeanSquaredError)
if display:
show_progress(i, iterations, loss)
if display:
print('\n')
return loss_hist, loss
def predict(self, X, y):
y_hat = X.dot(self.params)
loss = MeanSquaredError.loss(y, y_hat)
return y_hat, loss
| 3.46875
| 3
|
Python/exo v2/exo 3 v2.py
|
Cantin-L/Professionelle
| 1
|
12780467
|
<filename>Python/exo v2/exo 3 v2.py
'''
Auteur : <NAME>.
Site web : https://itliocorp.fr
Version : 0.2
License : MIT 3.0
Sujet :
Notions : Afficher une variable, Importation de valeur, Un petit peu de math, Commentaires
Fonction à utiliser :
input(...)
print(...)
'''
| 1.523438
| 2
|
CDKProjectVer2/AppStacks/ApplicationLoadBalancer/stacks/alb_conf.py
|
binhlh23/cdkbynooddev
| 0
|
12780468
|
<filename>CDKProjectVer2/AppStacks/ApplicationLoadBalancer/stacks/alb_conf.py
STACK = 'ALBHDBank'
AZ_1 = 'ap-southeast-1a'
AZ_2 = 'ap-southeast-1b'
AZ_3 = 'ap-southeast-1c'
VPC_ID = 'vpc-0d5fa16a59668d675'
ALBSUBNET = 'subnet-0a436875d4d79e4f5'
| 1.03125
| 1
|
native/all_load_tracking/plotter.py
|
EfficientAI/efficient_cv
| 0
|
12780469
|
<reponame>EfficientAI/efficient_cv
from matplotlib import pyplot as plt
def main():
timestamps = []
avgs = []
exps = []
loads = []
with open('output.txt', 'rb') as f:
lines = f.readlines()
for line in lines:
timestamp, avg, exp, load = line.split()
timestamps.append(int(timestamp))
avgs.append(float(avg))
exps.append(float(exp))
loads.append(float(load))
plt.plot(timestamps, loads)
plt.plot(timestamps, avgs)
plt.plot(timestamps, exps)
plt.show()
if __name__ == "__main__":
main()
| 2.5
| 2
|
scripts/2014/campdataparser.py
|
iBurnApp/iBurn-Data
| 7
|
12780470
|
'''
Converts Official Camp data provided in XML by Burning Man Org
to JSON of the PlayaEvents API format.
Input:
Burning Man Org Camp Placement XML
PlayaEvents Camp without location JSON
e.g: http://playaevents.burningman.com/api/0.2/2014/camp/
Output:
iBurn Camp Json (./camps.json)
'''
import xml.etree.ElementTree as ET
import re
import json
import pdb
# Should we write results to json?
DRY_RUN = False
''' Circular Streets '''
# Valid streets to appear in a playa address
# of form <Street> & <Time>
circ_streets = [
'Esplanade',
'Antioch',
'Basra',
'Cinnamon',
'Darjeeling',
'Ephesus',
'Frankincense',
'Gold',
'Haifa',
'Isfahan',
'Jade',
'Kandahar',
'Lapis',
'Rod\'s Road',
'Airport Rd',
'Portal',
'Public Plaza',
'Inner Circle', # Convert to Center Camp Plaza
'Plaza' # Convert to Center Camp Plaza
]
circ_street_duplicate_fixer = {
'Plaza' : 'Center Camp Plaza',
'Inner Circle' : 'Center Camp Plaza'
}
''' Playa time regex '''
time_re = re.compile('([1][0-2]|[1-9])[:/][0-5][0-9]')
def _clean_string(string):
if string:
string = re.sub(r'^[\n\t\s]+', '', string)
string = re.sub(r'[\n\t\s]+$', '', string)
string = string.replace(" (dot) ", ".")
string = string.replace(" (at) ", "@")
string = re.sub(r"[\n\t\s]+\s+[\n\t\s]+", "\n\n", string)
if string.find("by ") == 0:
string = string[4:]
string = string.split(", ")
return string
def cache_camp_loc(name, location, cache):
'''
Possible input:
name :
'Some camp name'
location:
'5:45 & Jade'
'Isfahan & 9:45'
'Rod's Road @ 1:00'
'Located within Black Rock Power CO-OP'
cache (output):
'Some camp name' : Isfahan & 9:45
'''
if location:
if 'located within' in location.lower():
# Skip, not cacheable
#print 'skipping ' + location
return
street = ''
time = time_re.search(location)
if time:
time = time.group(0)
for c_street in circ_streets:
if c_street in location:
street = c_street
break
else:
print 'unknown loc ' + location
return
#print street + ' & ' + time + ' (' + location + ')'
cache[name.strip().lower()] = street + ' & ' + time
tree = ET.parse("2014PlacementPublicDataFinal.xml")
print 'Checking ' + str(len(list(tree.getroot()))) + " items"
# List of all camps for json export
camp_list = []
# Map of stripped, lowercase camp name to playaAddress
camp_to_loc = {}
# List of stripped, lowercase camp names to help merge
# Burning Man Org and PlayaEvents data
camp_name_list = []
# First Pass: Create a map of camp name -> location to correct
# Camps with location of form: "Located within <CAMP Name>"
# Also populate camp_name_list
for element in list(tree.getroot().findall('{http://www.filemaker.com/fmpdsoresult}ROW')):
if element.find('{http://www.filemaker.com/fmpdsoresult}T_Name').text and element.find('{http://www.filemaker.com/fmpdsoresult}T_Loc_forExport').text:
camp_loc = element.find('{http://www.filemaker.com/fmpdsoresult}T_Loc_forExport').text
camp_loc = _clean_string(camp_loc).strip()
camp_name = element.find('{http://www.filemaker.com/fmpdsoresult}T_Name').text
camp_name_list.append(camp_name.strip().lower())
if 'located within' in camp_loc.lower():
# We're only caching camps with playa addresses
#print 'skipping ' + camp_loc
continue
camp_name = _clean_string(camp_name)
cache_camp_loc(camp_name, camp_loc, camp_to_loc)
#print json.dumps(camp_to_loc, sort_keys=True, indent=4, separators=(',', ': '))
if not DRY_RUN:
f = open('camp_locations.json', 'w')
f.write(json.dumps(camp_to_loc, sort_keys=True, indent=4, separators=(',', ': ')))
located_camps = 0
mystery_camps = 0
# Second Pass: Now build the final json representation
for element in list(tree.getroot().findall('{http://www.filemaker.com/fmpdsoresult}ROW')):
camp = {}
if element.find('{http://www.filemaker.com/fmpdsoresult}T_Name').text:
camp['name'] = element.find('{http://www.filemaker.com/fmpdsoresult}T_Name').text
if element.find('{http://www.filemaker.com/fmpdsoresult}T_PublicEmail').text:
camp['contact_email'] = element.find('{http://www.filemaker.com/fmpdsoresult}T_PublicEmail').text
if element.find('{http://www.filemaker.com/fmpdsoresult}T_Loc_forExport').text:
raw_location = element.find('{http://www.filemaker.com/fmpdsoresult}T_Loc_forExport').text
clean_name = _clean_string(camp['name']).strip()
if clean_name.lower() in camp_to_loc:
# Camp name found in location cache
camp['location'] = camp_to_loc[clean_name.lower()]
located_camps+=1
elif 'Located within' in raw_location:
# Camp location references another camp
clean_host_name = raw_location.split('Located within ')[1].strip()
#pdb.set_trace()
if clean_host_name.lower() in camp_to_loc:
camp['location'] = camp_to_loc[clean_host_name.lower()]
located_camps+=1
else:
print 'cant find ' + clean_host_name
if 'location' not in camp:
mystery_camps+=1
# If we couldn't lookup location, use XML
camp['location'] = raw_location
if element.find('{http://www.filemaker.com/fmpdsoresult}T_WWW').text:
camp['description'] = element.find('{http://www.filemaker.com/fmpdsoresult}T_WWW').text
if element.find('{http://www.filemaker.com/fmpdsoresult}T_Public_URL').text:
camp['url'] = element.find('{http://www.filemaker.com/fmpdsoresult}T_Public_URL').text
if element.find('{http://www.filemaker.com/fmpdsoresult}T_Year').text:
camp['year'] = {
'year' : element.find('{http://www.filemaker.com/fmpdsoresult}T_Year').text,
'id' : 10}
hometown = element.find('{http://www.filemaker.com/fmpdsoresult}T_hometown').text
homestate = element.find('{http://www.filemaker.com/fmpdsoresult}T_public_state').text
if hometown and homestate:
camp['hometown'] = hometown.strip() + ', ' + homestate.strip()
elif hometown:
camp['hometown'] = hometown.strip()
# Sanitize all values
try:
for key, value in camp.iteritems():
camp[key] = _clean_string(value).strip()
except TypeError:
# Value is not a string. Ignore sanitization error
pass
camp_list.append(camp)
#print json.dumps(camp, sort_keys=True, indent=4, separators=(',', ': '))
# Third pass -- Add any Camps from the PlayaEvents API that weren't
# included in the Burning Man Org XML
json_camps_file = open('playaevents_camps_8_11_14.json')
camps = json.load(json_camps_file)
print 'Camps in PlayaEvents API not in BMOrg data:'
for camp in camps:
if camp['name'].strip().lower() not in camp_name_list:
# Remove null keys from dict
camp = {k: v for k, v in camp.items() if v}
camp_list.append(camp)
print 'done!'
if not DRY_RUN:
f = open('camps.json', 'w')
f.write(json.dumps(camp_list, sort_keys=True, indent=4, separators=(',', ': ')))
print str(located_camps) + ' / ' + str(mystery_camps) + ' located / mystery camps'
'''
XML element tags:
{http://www.filemaker.com/fmpdsoresult}T_Year
{http://www.filemaker.com/fmpdsoresult}T_UNID_Project
{http://www.filemaker.com/fmpdsoresult}T_Name
{http://www.filemaker.com/fmpdsoresult}T_WWW
{http://www.filemaker.com/fmpdsoresult}T_hometown
{http://www.filemaker.com/fmpdsoresult}T_public_state
{http://www.filemaker.com/fmpdsoresult}T_public_country
{http://www.filemaker.com/fmpdsoresult}T_Public_URL
{http://www.filemaker.com/fmpdsoresult}T_PublicEmail
{http://www.filemaker.com/fmpdsoresult}T_Loc_forExport
'''
| 3.015625
| 3
|
notochord/test/__main__.py
|
jroose/notochord
| 0
|
12780471
|
<reponame>jroose/notochord
import unittest
from . import *
def main():
unittest.main(exit=False)
if __name__ == "__main__":
main()
| 1.21875
| 1
|
tests/sources/tracking/rasterisation.py
|
bcbnz/matplotlib-pgfutils
| 1
|
12780472
|
<gh_stars>1-10
from pgfutils import save, setup_figure
setup_figure(width=1, height=1)
from matplotlib import pyplot as plt
import numpy as np
d = np.random.randn(128, 128)
plt.imshow(d)
save()
| 2
| 2
|
drawingclasses/luaplot/luabar_n_line.py
|
Daguhh/ConkyLuaMakerGUIv2
| 19
|
12780473
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Class that implement draw of rectangles (for bar and line)
It use pygame.draw
methods :
- draw
- update
- resize
"""
import pygame
from .math_tools import PositionMathTool as pmt
from .math_tools import PositionValueObject as pval
class BarNLine:
def __init__(self):
self.input_remaning = 2
def draw(self, positions):
self.dct["from"] = pval(positions[0])
self.dct["to"] = pval(positions[1])
f = pval(positions[0])
t = pval(positions[1])
p = pval((0, 0))
of = pval((1, 1)) * self.dct[self.thickness_name]
f = f + p
t = t + p
p = pmt.min(f, t)
f = f - p + of
t = t - p + of
p = p - of
self._pos = p
self.dct["from"] = f
self.dct["to"] = t
def update(self):
h = self.dct[self.thickness_name] / 2
f = pval(self.dct["from"])
t = pval(self.dct["to"])
p = self._pos
of = pval((1, 1)) * self.dct[self.thickness_name] * 1
f = f + p
t = t + p
g = self.grid_step
f = pmt.discretize(f, g)
t = pmt.discretize(t, g)
p = pmt.min(f, t)
f = f - p + of
t = t - p + of
self._pos = p - of
self.dct["from"] = f
self.dct["to"] = t
dif = t - f
norm = dif.norm()
if norm != 0:
ratio = h / norm
else:
ratio = 1
w_p = ratio * dif ^ (0, 0, 1)
w_m = ratio * dif ^ (0, 0, -1)
c0 = f + w_m
c1 = f + w_p
c2 = t + w_p
c3 = t + w_m
corners = [c0, c1, c2, c3]
poly = [c.get() for c in corners]
rect = pmt.max(corners).get()
self.surface = pygame.Surface(rect, pygame.SRCALPHA)
self.surface.fill(pygame.Color("#77777720"))
self.shape = pygame.draw.polygon(self.surface, self.dct[self.color_name], poly)
self.mask = pygame.mask.from_surface(self.surface)
def resize(self, new_mouse_pos):
if self.dct["from"] != new_mouse_pos - self._pos:
self.dct["to"] = new_mouse_pos - self._pos
| 3.546875
| 4
|
bandits/__init__.py
|
XiaoMutt/ucbc
| 0
|
12780474
|
from .basis import Bandit
from .bernoulli import BernoulliBandit
from .normal import NormalBandit
from .bimodal import BimodalBandit
from .uniform import UniformBandit
| 1.148438
| 1
|
django_cradmin/renderable.py
|
appressoas/django_cradmin
| 11
|
12780475
|
import warnings
from django.template.loader import render_to_string
from django_cradmin import crsettings
def join_css_classes_list(css_classes_list):
"""
Join the provided list of css classes into a string.
"""
return ' '.join(css_classes_list)
class AbstractRenderable(object):
"""
An abstract class that implements an interface for
rendering something.
Everything is just helpers for the :meth:`.render` method,
which renders a template with an object of this class as
input.
"""
#: The default value for :meth:`.get_template_names`.
template_name = None
def get_template_names(self):
"""
Get the template name(s) for :meth:`.render`.
Defaults to :obj:`~.AbstractRenderable.template_name`.
Raises:
NotImplementedError: If :obj:`~.AbstractRenderable.template_name` is
not set.
"""
if self.template_name:
return self.template_name
else:
raise NotImplementedError('You must set template_name or override '
'get_template_names().')
def get_context_data(self, request=None):
"""
Get context data for :meth:`.render`.
Defaults to::
{
'me': self
}
"""
return {
'me': self
}
def render(self, request=None, extra_context_data=None):
"""
Render :obj:`.get_template_names` with
the context returned by :meth:`.get_context_data`.
Paramteters:
request (HttpRequest): If this is provided, we forward it to
:meth:`.get_context_data`, and to ``render_to_string()``
(which is used to render the template).
"""
context_data = {}
if extra_context_data:
context_data.update(extra_context_data)
context_data.update(self.get_context_data(request=request))
return render_to_string(
template_name=self.get_template_names(),
context=context_data,
request=request)
class AbstractRenderableWithCss(AbstractRenderable):
"""
Extends :class:`.AbstractRenderable` with a unified
API for setting CSS classes.
"""
def get_base_css_classes_list(self):
return []
def get_extra_css_classes_list(self):
return []
def get_css_classes_list(self):
"""
Override this to define css classes for the component.
Must return a list of css classes.
See :meth:`.get_css_classes_string`.
"""
css_classes_list = []
# if hasattr(self, 'get_base_css_classes_list'):
# warnings.warn("AbstractRenderableWithCss.get_base_css_classes_list() is deprectated "
# "- override get_css_classes_list() instead.",
# DeprecationWarning)
# css_classes_list.extend(self.get_base_css_classes_list())
# if hasattr(self, 'get_extra_css_classes_list'):
# warnings.warn("AbstractRenderableWithCss.get_extra_css_classes_list() is deprectated "
# "- override get_css_classes_list() instead.",
# DeprecationWarning)
# css_classes_list.extend(self.get_extra_css_classes_list())
css_classes_list.extend(self.get_base_css_classes_list())
css_classes_list.extend(self.get_extra_css_classes_list())
return css_classes_list
def get_test_css_class_suffixes_list(self):
"""
List of css class suffixes to include when running automatic tests.
These suffixes are filtered through the
:func:`~django_cradmin.templatetags.cradmin_tags.cradmin_test_css_class`
template tag.
"""
return []
@property
def css_classes(self):
"""
Get css classes.
Joins :meth:`.get_css_classes_list` into a string.
You should not override this, override :meth:`.get_css_classes_list` instead.
"""
from django_cradmin.templatetags import cradmin_tags # Avoid circular import
css_classes = list(self.get_css_classes_list())
if crsettings.get_setting('DJANGO_CRADMIN_INCLUDE_TEST_CSS_CLASSES', False):
for css_class_suffix in self.get_test_css_class_suffixes_list():
css_classes.append(cradmin_tags.cradmin_test_css_class(css_class_suffix))
return join_css_classes_list(css_classes)
def get_css_classes_string(self):
warnings.warn("AbstractRenderableWithCss.get_css_classes_string() is deprectated "
"- use the AbstractRenderableWithCss.css_classes property instead.",
DeprecationWarning)
return self.css_classes
class AbstractBemRenderable(AbstractRenderable):
"""
Base class for renderables that uses BEM (http://getbem.com/)
for their CSS class naming.
This is an alternative to :class:`.AbstractRenderableWithCss`
that makes it much more natural to work with BEM.
"""
def __init__(self, bem_block=None, bem_element=None, bem_variant_list=None,
extra_css_classes_list=None):
"""
Args:
bem_block (str): Get the BEM block. Can not be supplied if ``bem_element`` is supplied.
bem_element (str): Get the BEM element. Can not be supplied if ``bem_block`` is supplied.
bem_variant_list (list): Get a list of BEM variants for the block/element.
You do not include the block/element, just the part after ``--``.
extra_css_classes_list (list): List of extra css classes.
"""
if bem_block and bem_element:
raise ValueError('Can not specify both bem_block and bem_element arguments.')
if bem_element and '__' not in bem_element:
raise ValueError('bem_element must contain __')
self._bem_block = bem_block
self._bem_element = bem_element
self._bem_variant_list = bem_variant_list or []
self._extra_css_classes_list = extra_css_classes_list or []
def get_test_css_class_suffixes_list(self):
"""
List of css class suffixes to include when running automatic tests.
These suffixes are filtered through the
:func:`~django_cradmin.templatetags.cradmin_tags.cradmin_test_css_class`
template tag.
"""
return []
@property
def bem_block_or_element(self):
"""
Returns :meth:`.get_bem_block` falling back to :meth:`.get_bem_element`.
"""
return self.get_bem_block() or self.get_bem_element()
def get_bem_block(self):
"""
Get the bem block string.
"""
return self._bem_block
def get_bem_element(self):
"""
Get the bem element string.
"""
return self._bem_element
def get_bem_variant_list(self):
"""
Get a list of BEM variants.
You do not include the block/element, just the part after ``--``.
"""
return self._bem_variant_list
def get_extra_css_classes_list(self):
"""
Get a list of extra css classes.
"""
return self._extra_css_classes_list
@property
def css_classes(self):
"""
Get css classes as a string.
You should not override this, override :meth:`.get_bem_block` / :meth:`.get_bem_element`
and :meth:`.get_bem_variant_list` instead.
"""
from django_cradmin.templatetags import cradmin_tags # Avoid circular import
css_classes = []
if self.bem_block_or_element:
css_classes = [self.bem_block_or_element]
css_classes.extend(['{}--{}'.format(self.bem_block_or_element, variant)
for variant in self.get_bem_variant_list()])
if crsettings.get_setting('DJANGO_CRADMIN_INCLUDE_TEST_CSS_CLASSES', False):
for css_class_suffix in self.get_test_css_class_suffixes_list():
css_classes.append(cradmin_tags.cradmin_test_css_class(css_class_suffix))
css_classes.extend(self.get_extra_css_classes_list())
return join_css_classes_list(css_classes)
| 2.484375
| 2
|
doc/urls.py
|
sfreedgood/redline_django_with_NLP
| 0
|
12780476
|
<reponame>sfreedgood/redline_django_with_NLP
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name = 'index'),
path('upload', views.upload_doc, name='upload'),
path('nlptest', views.nlptest, name='nlptest')
]
| 1.640625
| 2
|
tests/unit/configuration/test_lint.py
|
dafrenchyman/kontrolilo
| 5
|
12780477
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
from pathlib import Path
from tempfile import TemporaryDirectory, NamedTemporaryFile
from unittest.mock import Mock
from kontrolilo.configuration import Configuration, ConfigurationInclude
from kontrolilo.configuration.configuration import CONFIG_FILE_NAME
from kontrolilo.configuration.lint import ConfigurationFileChecker
class TestConfigurationFileChecker:
def setup(self):
self.checker = ConfigurationFileChecker()
self.directory = TemporaryDirectory()
self.cache_file = NamedTemporaryFile()
def test_run_returns_zero_on_valid_configuration(self):
base_configuration = Configuration(
allowed_licenses=['MIT', 'Apache 2.0'],
includes=[
ConfigurationInclude(url='https://examle.com/test.yaml'),
ConfigurationInclude(url='https://examle.com/test2.yaml'),
],
cache_name=self.cache_file.name
)
base_configuration.save_to_directory(self.directory.name)
args = Mock()
args.filenames = [Path(self.directory.name, CONFIG_FILE_NAME).absolute()]
assert self.checker.run(args) == 0
assert Configuration.load_from_directory(self.directory.name) == Configuration(
allowed_licenses=['Apache 2.0', 'MIT'],
includes=[
ConfigurationInclude(url='https://examle.com/test.yaml'),
ConfigurationInclude(url='https://examle.com/test2.yaml'),
],
cache_name=self.cache_file.name
)
def test_run_returns_non_zero_on_invalid_configuration(self):
path = Path(self.directory.name, CONFIG_FILE_NAME)
with open(path.absolute(), 'w') as file:
file.write('---')
args = Mock()
args.filenames = [path.absolute()]
assert self.checker.run(args) != 0
| 2.359375
| 2
|
quote.py
|
babu1998/Widhya-auth
| 0
|
12780478
|
<gh_stars>0
import os
import random
import time
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
def recommend_font_size(text):
size = 45
l = len(text)
resize_heuristic = 0.9
resize_actual = 0.985
while l > 1:
l = l * resize_heuristic
size = size * resize_actual
return int(size)
def select_background_image():
prefix = "input/"
options = os.listdir(prefix)
return prefix + random.choice(options)
def select_font():
prefix = "fonts/"
options = os.listdir(prefix)
return prefix + random.choice(options)
def wrap_text(text, w=30):
new_text = ""
new_sentence = ""
for word in text.split(" "):
delim = " " if new_sentence != "" else ""
new_sentence = new_sentence + delim + word
if len(new_sentence) > w:
new_text += "\n" + new_sentence
new_sentence = ""
new_text += "\n" + new_sentence
return new_text
def write_image(text, output_filename, background_img):
# setup
text = wrap_text(text)
img = Image.new("RGBA", (IMAGE_WIDTH, IMAGE_HEIGHT), (255, 255, 255))
# background
back = Image.open(background_img, 'r')
img_w, img_h = back.size
bg_w, bg_h = img.size
offset = ((bg_w - img_w) / 2, (bg_h - img_h) / 2)
img.paste(back, offset)
# text
font = ImageFont.truetype(FONT, FONT_SIZE)
draw = ImageDraw.Draw(img)
img_w, img_h = img.size
x = img_w / 2
y = img_h / 2
textsize = draw.multiline_textsize(text, font=IF, spacing=SPACING)
text_w, text_h = textsize
x -= text_w / 2
y -= text_h / 2
draw.multiline_text(align="center", xy=(x, y), text=text, fill=COLOR, font=font, spacing=SPACING)
draw = ImageDraw.Draw(img)
# output
img.save(output_filename)
return output_filename
# text
text = "This is a test. This is a test. This is a test. This is a test. "
output_filename = "output/{}.png".format(int(time.time()))
# config
FONT = select_font()
FONT_SIZE = recommend_font_size(text)
print(FONT_SIZE)
IF = ImageFont.truetype(FONT, FONT_SIZE)
IMAGE_WIDTH = 600
IMAGE_HEIGHT = 350
COLOR = (255, 255, 255)
SPACING = 3
| 2.8125
| 3
|
tests/learning/test_semi_supervised_learning.py
|
dumpmemory/nlpatl
| 18
|
12780479
|
<gh_stars>10-100
from typing import List, Union
from datasets import load_dataset
import unittest
import numpy as np
from nlpatl.models.embeddings import Transformers
from nlpatl.models.classification import (
Classification,
SkLearnClassification,
XGBoostClassification,
)
from nlpatl.learning import SemiSupervisedLearning
from nlpatl.dataset import Dataset
class TestLearningSemiSupervised(unittest.TestCase):
@classmethod
def setUpClass(cls):
texts = load_dataset("ag_news")["train"]["text"]
labels = load_dataset("ag_news")["train"]["label"]
cls.train_texts = texts[0:5] + texts[200:205] + texts[1000:1005]
cls.train_labels = labels[0:5] + labels[200:205] + labels[1000:1005]
cls.test_texts = texts[0:10] + texts[200:210]
cls.test_labels = labels[0:10] + labels[200:210]
cls.transformers_embeddings_model = Transformers(
"bert-base-uncased", nn_fwk="pt", padding=True, batch_size=3
)
cls.sklearn_classification_model = SkLearnClassification(
"logistic_regression", model_config={"max_iter": 500}
)
cls.xgboost_classification_model = XGBoostClassification(
model_config={"use_label_encoder": False, "eval_metric": "logloss"}
)
def test_explore_by_sklearn(self):
learning = SemiSupervisedLearning(
sampling="most_confidence",
embeddings=self.transformers_embeddings_model,
classification=self.sklearn_classification_model,
)
learning.learn(self.train_texts, self.train_labels)
result = learning.explore(self.test_texts)
assert result, "No output"
assert result["inputs"], "Missed inputs attribute"
def test_explore_by_xgboost(self):
learning = SemiSupervisedLearning(
sampling="most_confidence",
embeddings=self.transformers_embeddings_model,
classification=self.xgboost_classification_model,
)
learning.learn(self.train_texts, self.train_labels)
result = learning.explore(self.test_texts)
assert result, "No output"
assert result["inputs"], "Missed inputs attribute"
def test_custom_classification_model(self):
class CustomClassification(Classification):
def __init__(self, model):
self.model = model
def train(
self,
x: np.array,
y: [np.array, List[str], List[int], List[List[str]], List[List[int]]],
):
"""
Do training here
e.g. self.model.train(x, y)
"""
...
def predict_proba(
self, x, predict_config: dict = {}
) -> Union[Dataset, object]:
"""
Do probability prediction here
e.g. preds = self.model.predict_prob(x, **predict_config)
"""
probs = np.random.rand(len(x), 3)
preds = np.argmax(probs, axis=1)
return Dataset(feautes=x, values=probs, groups=preds.tolist())
learning = SemiSupervisedLearning(
sampling="most_confidence",
embeddings=self.transformers_embeddings_model,
classification=CustomClassification(model=None),
multi_label=True,
)
learning.learn(self.train_texts, self.train_labels)
assert True, "Unable to apply custom classification model"
| 2.640625
| 3
|
reverse_a_string.py
|
yehnan/python_note
| 0
|
12780480
|
s = 'Hello Python'
# s.reverse()
print(s[::-1])
print(''.join(reversed(s)))
def reverse_i(s):
r = ''
for c in s:
r = c + r
return r
print(reverse_i(s))
def reverse_r(s):
if len(s) <= 1:
return s
else:
return reverse_r(s[1:]) + s[0]
print(reverse_r(s))
r = list(s)
r.reverse()
print(''.join(r))
def reverse_li(s):
r = []
for c in s:
r.insert(0, c)
return ''.join(r)
print(reverse_li(s))
print(''.join([s[i] for i in range(len(s)-1, -1, -1)]))
print(''.join(s[i] for i in range(len(s)-1, -1, -1)))
def reverse_g(s):
def sub(s):
for i in range(len(s)-1, -1, -1):
yield s[i]
return ''.join(sub(s))
print(reverse_g(s))
from collections import deque
r = deque(s)
r.reverse()
print(''.join(r))
| 3.84375
| 4
|
piptui/custom/customForm.py
|
MrNaif2018/PipTUI
| 0
|
12780481
|
from npyscreen import FormBaseNew
import curses
class FormBaseNewHinted(FormBaseNew):
def display_menu_advert_at(self):
return self.lines - 1, 1
def draw_form(self):
super(FormBaseNewHinted, self).draw_form()
menu_advert = '^A: Install\t\t^R: Uninstall\t\t^U: Update'
if isinstance(menu_advert, bytes):
menu_advert = menu_advert.decode('utf-8', 'replace')
y, x = self.display_menu_advert_at()
self.add_line(y, x,
menu_advert,
self.make_attributes_list(menu_advert, curses.A_NORMAL),
self.columns - x - 1
)
| 2.53125
| 3
|
utils/tool.py
|
Gagarinwjj/Coeus
| 139
|
12780482
|
<filename>utils/tool.py
# coding: utf-8
__author__ = 'deff'
import re
class Tools:
@staticmethod
def xml_assent(word):
symbola = re.compile('>')
word = symbola.sub('<', word)
symbolb = re.compile('<')
word = symbolb.sub('>', word)
symbolc = re.compile('&')
word = symbolc.sub('&', word)
symbold = re.compile('\'')
word = symbold.sub(''', word)
symbole = re.compile('\"')
word = symbole.sub('"', word)
return word
| 2.59375
| 3
|
a.sikorska/PD3/Alicja_Sikorska_pd3.py
|
alsikorska/python_wprowadzenie_warsztaty_2021
| 0
|
12780483
|
# Zadanie 1
def Ciag_fib(n):
a = 1
b = 1
print(a)
print(b)
for i in range(1, n-1):
a, b = b, a + b
print(b)
return
Ciag_fib(50)
# Zadanie 3
def Unique_elements(*lista):
lista_unique=[]
for n in range(0, len(lista)) :
if lista[n] not in lista_unique :
lista_unique.append(lista[n])
print(lista_unique)
return
Unique_elements(1,2,3,3,3,3,4,5)
| 3.5
| 4
|
mediawiki_auth/middleware.py
|
damoti/django-mediawiki-auth
| 1
|
12780484
|
<gh_stars>1-10
from django.conf import settings
from django.utils.deprecation import MiddlewareMixin
from django.utils.functional import SimpleLazyObject
from mediawiki_auth import mediawiki
def get_user(request):
if not hasattr(request, '_cached_user'):
request._cached_user = mediawiki.get_or_create_django_user(request)
return request._cached_user
class AuthenticationMiddleware(MiddlewareMixin):
def process_request(self, request):
assert hasattr(request, 'session'), (
"The Django authentication middleware requires session middleware "
"to be installed. Edit your MIDDLEWARE%s setting to insert "
"'django.contrib.sessions.middleware.SessionMiddleware' before "
"'django.contrib.auth.middleware.AuthenticationMiddleware'."
) % ("_CLASSES" if settings.MIDDLEWARE is None else "")
request.user = SimpleLazyObject(lambda: get_user(request))
| 2.21875
| 2
|
tests/GitLab/test_gitlab_hoster.py
|
GitMateIO/IGitt
| 6
|
12780485
|
import os
from IGitt.GitLab import GitLabOAuthToken
from IGitt.GitLab.GitLab import GitLab
from IGitt.GitLab.GitLabComment import GitLabComment
from IGitt.GitLab.GitLabCommit import GitLabCommit
from IGitt.GitLab.GitLabIssue import GitLabIssue
from IGitt.GitLab.GitLabMergeRequest import GitLabMergeRequest
from IGitt.Interfaces import AccessLevel
from IGitt.Interfaces.Actions import IssueActions, MergeRequestActions, \
PipelineActions
from tests import IGittTestCase
class GitLabHosterTest(IGittTestCase):
def setUp(self):
self.gl = GitLab(GitLabOAuthToken(os.environ.get('GITLAB_TEST_TOKEN', '')))
def test_repo_permissions_inheritance(self):
repos = [
{
'namespace':{'id': 1, 'parent_id': None},
'permissions': {'group_access': {'access_level': 40},
'project_access': None}
},
{
'namespace': {'id': 2, 'parent_id': 1},
'permissions': {'group_access': None, 'project_access': None}
},
{
'namespace': {'id': 3, 'parent_id': 2},
'permissions': {'group_access': None, 'project_access': None}
},
{
'namespace': {'id': 4, 'parent_id': None},
'permissions': {'group_access': None,
'project_access': {'access_level': 40}}
}
]
self.assertEqual(set(map(lambda x: x['namespace']['id'],
GitLab._get_repos_with_permissions(
repos, AccessLevel.ADMIN))),
{1, 2, 3, 4})
def test_master_repositories(self):
self.assertEqual(sorted(map(lambda x: x.full_name, self.gl.master_repositories)),
['gitmate-test-user/test'])
def test_owned_repositories(self):
self.assertEqual(sorted(map(lambda x: x.full_name, self.gl.owned_repositories)),
['gitmate-test-user/test'])
def test_write_repositories(self):
self.assertEqual(sorted(map(lambda x: x.full_name, self.gl.write_repositories)),
['gitmate-test-user/test'])
def test_get_repo(self):
self.assertEqual(self.gl.get_repo('gitmate-test-user/test').full_name,
'gitmate-test-user/test')
class GitLabWebhookTest(IGittTestCase):
def setUp(self):
self.gl = GitLab(GitLabOAuthToken(
os.environ.get('GITLAB_TEST_TOKEN', '')))
self.repo_name = 'test/test'
self.default_data = {
'project': {
'path_with_namespace': self.repo_name,
},
'object_attributes': {
'id': 12,
'iid': 23,
'action': 'open',
'noteable_type': 'Issue',
'target': {
'path_with_namespace': 'gitmate-test-user/test'
}
},
'commit': {
'id': 'bcb<PASSWORD>',
},
'merge_request': {
'iid': 123,
},
'issue': {
'iid': 123,
'action': 'open',
},
'repository': {
'git_ssh_url': '<EMAIL>:gitmate-test-user/test.git'
}
}
def test_unknown_event(self):
with self.assertRaises(NotImplementedError):
list(self.gl.handle_webhook('unknown_event', self.default_data))
def test_issue_hook(self):
for event, obj in self.gl.handle_webhook('Issue Hook',
self.default_data):
self.assertEqual(event, IssueActions.OPENED)
self.assertIsInstance(obj[0], GitLabIssue)
def test_pr_hook(self):
for event, obj in self.gl.handle_webhook('Merge Request Hook',
self.default_data):
self.assertEqual(event, MergeRequestActions.OPENED)
self.assertIsInstance(obj[0], GitLabMergeRequest)
def test_pr_synchronized(self):
data = self.default_data
data['object_attributes']['oldrev'] = 'deadbeef'
for event, obj in self.gl.handle_webhook('Merge Request Hook',
self.default_data):
self.assertEqual(event, MergeRequestActions.SYNCHRONIZED)
self.assertIsInstance(obj[0], GitLabMergeRequest)
def test_issue_comment(self):
for event, obj in self.gl.handle_webhook('Note Hook',
self.default_data):
self.assertEqual(event, IssueActions.COMMENTED)
self.assertIsInstance(obj[0], GitLabIssue)
self.assertIsInstance(obj[1], GitLabComment)
def test_unsupported_comment(self):
data = self.default_data
data['object_attributes']['noteable_type'] = 'Snippet'
with self.assertRaises(NotImplementedError):
list(self.gl.handle_webhook('Note Hook', data))
def test_pr_comment(self):
data = self.default_data
del data['project']
data['object_attributes']['noteable_type'] = 'MergeRequest'
for event, obj in self.gl.handle_webhook('Note Hook', data):
self.assertEqual(event, MergeRequestActions.COMMENTED)
self.assertIsInstance(obj[0], GitLabMergeRequest)
self.assertIsInstance(obj[1], GitLabComment)
def test_status(self):
del self.default_data['project']
del self.default_data['object_attributes']
for event, obj in self.gl.handle_webhook('Pipeline Hook',
self.default_data):
self.assertEqual(event, PipelineActions.UPDATED)
self.assertIsInstance(obj[0], GitLabCommit)
def test_issue_label(self):
obj_attrs = self.default_data['object_attributes']
obj_attrs.update({'action': 'update'})
self.default_data.update({
'object_attributes': obj_attrs,
'changes': {
'labels': {
'previous': [{'title': 'old'}, {'title': 'old2'}],
'current': [{'title': 'new'}],
},
},
})
unlabeled_labels = set()
labeled_labels = set()
for event, obj in self.gl.handle_webhook('Issue Hook',
self.default_data):
self.assertIsInstance(obj[0], GitLabIssue)
if event == IssueActions.LABELED:
labeled_labels.add(obj[1])
elif event == IssueActions.UNLABELED:
unlabeled_labels.add(obj[1])
self.assertEqual(unlabeled_labels, {'old', 'old2'})
self.assertEqual(labeled_labels, {'new'})
def test_merge_request_label(self):
obj_attrs = self.default_data['object_attributes']
obj_attrs.update({'action': 'update'})
self.default_data.update({
'object_attributes': obj_attrs,
'changes': {
'labels': {
'previous': [{'title': 'old'}, {'title': 'old2'}],
'current': [{'title': 'new'}],
},
},
})
unlabeled_labels = set()
labeled_labels = set()
for event, obj in self.gl.handle_webhook('Merge Request Hook',
self.default_data):
self.assertIsInstance(obj[0], GitLabMergeRequest)
if event == MergeRequestActions.LABELED:
labeled_labels.add(obj[1])
elif event == MergeRequestActions.UNLABELED:
unlabeled_labels.add(obj[1])
self.assertEqual(unlabeled_labels, {'old', 'old2'})
self.assertEqual(labeled_labels, {'new'})
| 1.929688
| 2
|
pyexlatex/graphics/tikz/library.py
|
whoopnip/py-ex-latex
| 4
|
12780486
|
<reponame>whoopnip/py-ex-latex
from pyexlatex.models.item import SimpleItem
class TikZLibrary(SimpleItem):
name = 'usetikzlibrary'
def __init__(self, contents):
super().__init__(self.name, contents)
| 1.84375
| 2
|
_unittests/ut_pycode/test_references.py
|
Pandinosaurus/pyquickhelper
| 18
|
12780487
|
"""
@brief test tree node (time=5s)
"""
import sys
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import add_missing_development_version
class TestReferences(unittest.TestCase):
def test_references(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
paths = add_missing_development_version("pyquickhelper", __file__)
assert len(paths) <= 1 # no added paths if no need to add a path
if __name__ == "__main__":
unittest.main()
| 2.421875
| 2
|
develocorder/logger.py
|
wahtak/develocorder
| 10
|
12780488
|
from collections import deque
import numpy as np
class Logger:
"""Print recorded values."""
def __init__(self, name):
"""
:param name str: identifier for printed value
"""
self.name = name
def __call__(self, value):
print("{}: {}".format(self.name, value))
class WindowFilterLogger:
"""Filter and print recorded values."""
def __init__(self, name, filter_size):
"""
:param name str: identifier for printed value
:param filter_size: number of historic samples which are averaged.
No output until filter_size number of values have been recorded.
"""
self.name = name
self.values = deque(maxlen=filter_size)
def __call__(self, value):
self.values.append(value)
if len(self.values) == self.values.maxlen:
print("{}: {}".format(self.name, np.mean(self.values)))
| 3.140625
| 3
|
third_party/paprika.py
|
constructorfleet/rasa-actions-eddie
| 0
|
12780489
|
# import logging
# import sys
# import traceback
# from random import randint
# from typing import Dict, Text, Any, List
#
# from pyprika import Pyprika
# from rasa_sdk import Tracker
# from rasa_sdk.events import SlotSet
# from rasa_sdk.executor import CollectingDispatcher
#
# from . import (
# RECIPE_CATEGORIES_SLOT,
# RECIPE_DURATION_SLOT,
# RECIPE_NAME_SLOT,
# RECIPE_NAME_LIKE_SLOT,
# RECIPE_DIRECTIONS_SLOT,
# RECIPE_INGREDIENTS_SLOT
# )
#
# _LOGGER = logging.getLogger(__name__)
#
#
# def get_a_recipe(client: Pyprika,
# dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
# _LOGGER.warning("Getting slots")
# categories = next(tracker.get_latest_entity_values(RECIPE_CATEGORIES_SLOT), None)
# duration = next(tracker.get_latest_entity_values(RECIPE_DURATION_SLOT), None)
# name = next(tracker.get_latest_entity_values(RECIPE_NAME_LIKE_SLOT), None)
#
# try:
# _LOGGER.warning("Get recipes")
# recipes = client.get_recipes(
# categories=categories,
# duration=duration,
# name_like=name
# )
#
# index = randint(0, len(recipes) - 1)
# recipe = recipes[index] if recipes and len(recipes) > 0 else None
# if not recipe:
# dispatcher.utter_message(template="utter_recipe_not_found")
# else:
# dispatcher.utter_message(template="utter_recipe_name", recipe=recipe.name)
# return [
# SlotSet(RECIPE_NAME_SLOT, recipe.name),
# SlotSet(RECIPE_INGREDIENTS_SLOT, recipe.ingredients),
# SlotSet(RECIPE_DIRECTIONS_SLOT, recipe.directions)
# ]
# except Exception as err:
# _LOGGER.warning(str(err))
# _LOGGER.exception(err)
# dispatcher.utter_message(template="utter_recipe_failed")
#
# return []
| 1.960938
| 2
|
alloy_related/alloyToRailML/parserAlloy/parser.py
|
pedrordgs/RailML-Utilities
| 21
|
12780490
|
<gh_stars>10-100
import xml.etree.ElementTree as ET
from parserAlloy.netElement import NetElement
from parserAlloy.netRelation import NetRelation
from parserAlloy.network import Network
from parserAlloy.level import Level
from parserAlloy.railway import Railway
def strToPos(pos):
if pos == 'Zero':
return 0
else:
return 1
def parseNetElement(rail, nelems, rels, ecu):
for elem in nelems:
nelem = elem.get('label').split('$')[1]
ident = 'ne_' + nelem
if rels != None:
assoc_rels = rels.findall(f'.//atom[@label="NetElement${nelem}"]/..')
else:
assoc_rels = []
r = []
for rel in assoc_rels:
netr = 'nr_' + rel[1].get('label').split('$')[1]
r.append(netr)
if ecu != None:
assoc_ecu = ecu.findall(f'.//atom[@label="NetElement${nelem}"][1]/..')
else:
assoc_ecu = []
ecol = []
for e in assoc_ecu:
el = 'ne_' + e[1].get('label').split('$')[1]
ecol.append(el)
rail.addNetElement(NetElement(ident, r, ecol))
def parseNetRelation(rail, nrels, navs, possA, possB, elemsA, elemsB):
for rel in nrels:
nrel = rel.get('label').split('$')[1]
ident = 'nr_' + nrel
nav = navs.find(f'.//atom[@label="NetRelation${nrel}"]/..')[1].get('label').split('$')[0]
posA = strToPos(possA.find(f'.//atom[@label="NetRelation${nrel}"]/..')[1].get('label').split('$')[0])
posB = strToPos(possB.find(f'.//atom[@label="NetRelation${nrel}"]/..')[1].get('label').split('$')[0])
elemA = 'ne_' + elemsA.find(f'.//atom[@label="NetRelation${nrel}"]/..')[1].get('label').split('$')[1]
elemB = 'ne_' + elemsB.find(f'.//atom[@label="NetRelation${nrel}"]/..')[1].get('label').split('$')[1]
rail.netRelations.append(NetRelation(ident, nav, posA, posB, elemA, elemB))
def parseNetworks(rail, nets, lvls, desc_lvl, net_res):
for n in nets:
net = n.get('label').split('$')[1]
ident = 'net_' + net
levels = lvls.findall(f'.//atom[@label="Network${net}"]/..')
lvls = []
for l in levels:
lvl = l[1].get('label')
lvl_id = 'lvl_' + lvl.split('$')[1]
desc = desc_lvl.find(f'.//atom[@label="{lvl}"]/..')[1].get('label').split('$')[0]
netr_node = net_res.findall(f'.//atom[@label="{lvl}"]/..')
net_resources = []
for netr in netr_node:
s = netr[1].get('label').split('$')
if s[0] == 'NetElement':
net_resources.append('ne_' + s[1])
else:
net_resources.append('nr_' + s[1])
lvls.append(Level(lvl_id, desc, net_resources))
rail.networks.append(Network(ident, lvls))
def parseAlloyXML(filename):
rail = Railway()
tree = ET.parse(filename)
instance = tree.getroot()[0]
nelems = tree.find(f'.//sig[@label="this/NetElement"]')
rels = tree.find(f'.//field[@label="relation"]')
ecu = tree.find(f'.//field[@label="elementCollectionUnordered"]')
parseNetElement(rail, nelems, rels, ecu)
nrels = tree.find(f'.//sig[@label="this/NetRelation"]')
navs = tree.find(f'.//field[@label="navigability"]')
possA = tree.find(f'.//field[@label="positionOnA"]')
possB = tree.find(f'.//field[@label="positionOnB"]')
elemsA = tree.find(f'.//field[@label="elementA"]')
elemsB = tree.find(f'.//field[@label="elementB"]')
parseNetRelation(rail, nrels, navs, possA, possB, elemsA, elemsB)
nets = tree.find(f'.//sig[@label="this/Network"]')
lvls = tree.find(f'.//field[@label="level"]')
desc_lvl = tree.find(f'.//field[@label="descriptionLevel"]')
net_res = tree.find(f'.//field[@label="networkResource"]')
parseNetworks(rail, nets, lvls, desc_lvl, net_res)
return rail
| 2.46875
| 2
|
shim/helper/settings.py
|
jonathan-innis/azure-extension-foundation
| 0
|
12780491
|
from ctypes import *
from helper.types import GoString
import json
class Settings:
def __init__(self, lib):
self.lib = lib
self.lib.GetSettings.argtypes = []
self.lib.GetSettings.restype = c_char_p
def get_settings(self):
ret = self.lib.GetSettings()
return json.loads(ret)
| 2.375
| 2
|
app/management/commands/crawl.py
|
erikreed/cpotrace
| 0
|
12780492
|
<reponame>erikreed/cpotrace<filename>app/management/commands/crawl.py<gh_stars>0
import time
from django.core.management.base import BaseCommand
from app.models import Car
from app.crawler import COUNTRY_CODES, TeslaCrawler, CrawlerException, TeslaSlackClient
class Command(BaseCommand):
help = 'Crawls and updates the car database'
def add_arguments(self, parser):
parser.add_argument('--country-codes', help='Comma delimited list of country codes. Defaults to all.')
parser.add_argument('--slack-webhook', help='Slack webhook URL.')
def handle(self, *args, **options):
slack_client = TeslaSlackClient(webhook=options['slack_webhook']) if options['slack_webhook'] else None
for country_code in options['country_codes'].split(',') if options['country_codes'] else COUNTRY_CODES:
crawler = TeslaCrawler(slack_client=slack_client, country_code=country_code)
for status in ('used', 'new'):
for model in ('MODEL_S', 'MODEL_X'):
url = crawler.make_url(metro_id=None, status=status, model=model)
print('Fetching', status, model, 'in', country_code)
try:
cars = crawler.check_url(url)
crawler.update_database(cars)
except CrawlerException as e:
print('Error crawling:', e, url)
print('Sleeping 1 second [%d cars in database]' % Car.objects.count())
time.sleep(1)
print('Done!')
print('Saw %d P85s w/ autopilot' % Car.objects.filter(badge__in=['P85', 'P85+'], is_autopilot=True).count())
| 2.171875
| 2
|
load_vctk.py
|
buzem/inzpeech
| 2
|
12780493
|
<filename>load_vctk.py
#!/usr/bin/env python
# coding: utf-8
import os
import numpy as np
from sklearn.model_selection import train_test_split
def get_person_label(pname):
return int(pname.replace('p', ''))
def get_samples_from_person(person_path, sample_count, mics):
"""
Return path of audio samples selected from a person folder.
Params:
person_path: Path for the person
sample_count: Number of samples to select
mics: Mic number of the selected audio samples. Can be one of [1], [2], [1, 2]. If both mics included
The code could return same audio files recorded from both mics.
Returns:
audio_paths: Relative path of the audio samples
"""
audio_files = os.listdir(person_path)
mic_string = ['mic'+ str(n) for n in mics ]
audio_files = [af for af in audio_files if af.split('.')[0].split('_')[-1] in mic_string]
sample_count = min(len(audio_files), sample_count)
audio_paths = [os.path.join(person_path, af) for af in audio_files]
return np.random.choice(audio_paths, sample_count, replace=False).tolist()
def get_model_data(data_main_dir, sample_per_person, split=[0.1, 0.1], shuffle=True, mics=[1,2], include_person=None):
"""
Return audio file paths and corresponding labels.
Params:
data_main_dir: Parent directory for the dataset
sample_per_person: Number of samples to select
split: Ratios for the test and validation sets. Default values are 0.1 for test and 0.1 for validation.
shuffle: Whether to shuffle the paths and labels before returning them. If you pass this false, consecutive audio files
will obtanied from same person.
mics: Mic number of the selected audio samples. Can be one of [1], [2], [1, 2]. If both mics included
The code could return same audio files recorded from both mics.
include_person: Persons to include in the data. Default is None. When passed None, it takes audios from all.
Returns:
audio paths and labels for each subset. Audio paths and labels are given as a single list for each subset
"""
all_audio_paths = []
labels = []
person_names = [pname for pname in os.listdir(data_main_dir) if 'p' in pname]
if include_person:
person_names = [pname for pname in person_names if pname in include_person]
person_paths = [os.path.join(data_main_dir, p) for p in person_names]
for i, ppath in enumerate(person_paths):
audio_paths = get_samples_from_person(ppath, sample_per_person, mics)
labels = labels + len(audio_paths) * [i]
all_audio_paths = all_audio_paths + audio_paths
if shuffle:
idx = np.arange(len(labels))
np.random.shuffle(idx)
labels = np.array(labels)[idx].tolist()
all_audio_paths = np.array(all_audio_paths)[idx].tolist()
tr_val_audio, test_audio, tr_val_labels, te_labels = train_test_split(all_audio_paths, labels, test_size=split[0], random_state=42)
tr_audio, val_audio, tr_labels, val_labels = train_test_split(tr_val_audio, tr_val_labels, test_size=split[1], random_state=42)
return [tr_audio, tr_labels], [val_audio, val_labels], [test_audio, te_labels]
def get_model_data_for_batch(data_main_dir, sample_per_person, person_count_per_batch , shuffle=True, mics=[1,2]):
"""
Return audio file paths and corresponding labels for a batch.
Params:
data_main_dir: Parent directory for the dataset
sample_per_person: Number of samples to select
person_count_per_batch: Number of persons to be added for each batch. Note that the batch number will be equal to
sample_per_person * person_count_per_batch
shuffle: Whether to shuffle the paths and labels before returning them. If you pass this false, consecutive audio files
will obtanied from same person.
mics: Mic number of the selected audio samples. Can be one of [1], [2], [1, 2]. If both mics included
The code could return same audio files recorded from both mics.
Returns:
audio_paths: Relative path of the audio samples
"""
all_audio_paths = []
labels = []
person_names = [pname for pname in os.listdir(data_main_dir) if 'p' in pname]
person_paths = [os.path.join(data_main_dir, p) for p in person_names]
person_labels = [get_person_label(pname) for pname in person_names]
# Sample persons
idx = np.arange(len(person_paths))
selected_idx = np.random.choice(idx, person_count_per_batch, replace=False)
# Select person names, paths and corresponding labels
person_names = np.array(person_names)[selected_idx].tolist()
person_paths = np.array(person_paths)[selected_idx].tolist()
for i, ppath in enumerate(person_paths):
audio_paths = get_samples_from_person(ppath, sample_per_person, mics)
labels = labels + len(audio_paths) * [i]
all_audio_paths = all_audio_paths + audio_paths
if shuffle:
idx = np.arange(len(labels))
np.random.shuffle(idx)
labels = np.array(labels)[idx].tolist()
all_audio_paths = np.array(all_audio_paths)[idx].tolist()
return all_audio_paths, labels
| 2.890625
| 3
|
SampleCode/5_DivideAndConquer.py
|
k3a-uw/tcss503
| 0
|
12780494
|
import numpy as np
import time
import pandas as pd
import matplotlib.pyplot as plt
import random
def karatsuba(x, y):
""" Recursive implementation of Karatsuba's Fast Mulciplication Algoritihm
:param x: The first integer
:param y: The second integer
:return: The product of x * y
"""
if x < 10 or y < 10:
return x*y
m = max(len(str(x)), len(str(y))) // 2
x_high = x // 10**m
x_low = x % 10**m
y_high = y // 10**m
y_low = y % 10**m
z0 = karatsuba(x_low, y_low)
z1 = karatsuba(x_low + x_high, y_low + y_high)
z2 = karatsuba(x_high, y_high)
return z2 * 10 ** (2 * m) + (z1 - z2 - z0) * 10 ** m + z0
def karat_compare(max_size, tests):
samples = []
test_sizes = np.linspace(1,max_size, tests).astype(int)
standard_results = []
karatsuba_results = []
for test_size in test_sizes:
x_str = ''
y_str = ''
for x in range(test_size):
x_str += str(random.randint(0,9))
y_str += str(random.randint(0,9))
samples.append((int(x_str), int(y_str)))
print(f"Samples Generated: {len(samples)}, with max size: {max_size}")
for sample, test_size in zip(samples, test_sizes):
print(f"Attempting numbers of 10^{test_size}")
x = sample[0]
y = sample[1]
t_start = time.perf_counter()
r = x * y
standard_results.append(time.perf_counter() - t_start)
t_start = time.perf_counter()
r = karatsuba(x, y)
karatsuba_results.append(time.perf_counter() - t_start)
plt.plot(test_size, standard_results, label="python native")
plt.plot(test_size, karatsuba_results, label="karatsuba")
plt.xlabel("10^x")
plt.ylabel("Seconds")
plt.legend()
plt.show()
def naive_matrix_multiplication_lists(a, b):
"""
Uses nested loops to calculate AB
:param a: An MxN matrix of numbers.
:param b: An NxP matrix of numbers.
:return: An MxP matrix of numbers which is the product: AB.
"""
M = len(a)
N = len(a[0])
if len(b) != N:
raise ValueError("The Matrices Provide are not the proper shape.")
P = len(b[0])
c = [[0 for i in range(P)] for j in range(M)]
for i in range(0,M):
for j in range(0,P):
for k in range(0,N):
c[i][j] += a[i][k] * b[k][j]
return c
def naive_matrix_multiplication_np(a,b):
M, N = a.shape
n, P = b.shape
if N != n:
raise ValueError("The Matrices Provide are not the proper shape.")
c = np.zeros((M,P))
for i in range(0,M):
for j in range(0,P):
for k in range(0,N):
c[i][j] += a[i][k] * b[k][j]
return c
if __name__ == "__main__":
a = [[1, 2, 5],
[3, 4, 6]]
b = [[5, 6],
[7, 8],
[1, 1]]
c = naive_matrix_multiplication_lists(a, b)
print("List Results:\n", c)
A = np.array(a)
B = np.array(b)
C = naive_matrix_multiplication_np(A, B)
print("NP Array Results:\n", C)
expected_results = np.array([[24, 27], [49, 56]])
print("Expected Results:\n", expected_results)
| 3.609375
| 4
|
src/utils.py
|
p0werHu/articulated-objects-motion-prediction
| 14
|
12780495
|
<filename>src/utils.py
# implemented by JunfengHu
# create time: 7/20/2019
import sys
import time
import numpy as np
import copy
import torch
import os
def create_directory(config):
"""
crate Checkpoint directory path
modified from https://github.com/BII-wushuang/Lie-Group-Motion-Prediction
:param config:
:return:
"""
folder_dir = config.dataset + '/' + config.datatype + '_' + config.loss + 'loss_' + config.model
if config.model == 'HMR':
folder_dir += '_RecurrentSteps=' + str(config.encoder_recurrent_steps) + '_' + 'ContextWindow=' + str(
config.context_window) + '_' + 'hiddenSize=' + str(config.hidden_size)
if config.model == 'ST_HRN':
folder_dir += '_RecurrentSteps=' + str(config.encoder_recurrent_steps) + '_hiddenSize=' + str(config.hidden_size) \
+ '_decoder_name=' + str(config.decoder)
folder_dir += '/' + config.filename + '/'
folder_dir += 'inputWindow=' + str(config.input_window_size) + '_outputWindow=' + str(
config.output_window_size) + '/'
checkpoint_dir = './checkpoint/' + folder_dir
output_dir = './output/' + folder_dir
return [checkpoint_dir, output_dir]
def get_file_list(file_path):
dir_list = os.listdir(file_path)
if not dir_list:
return
else:
dir_list = sorted(dir_list, key=lambda x: os.path.getmtime(os.path.join(file_path, x)))
# print(dir_list)
return dir_list
def expmap2rotmat(A):
theta = np.linalg.norm(A)
if theta == 0:
R = np.identity(3)
else:
A = A / theta
cross_matrix = np.array([[0, -A[2], A[1]], [A[2], 0, -A[0]], [-A[1], A[0], 0]])
R = np.identity(3) + np.sin(theta) * cross_matrix + (1 - np.cos(theta)) * np.matmul(cross_matrix, cross_matrix)
return R
def rotmat2euler(R):
if R[0, 2] == 1 or R[0, 2] == -1:
E3 = 0
dlta = np.arctan2(R[0, 1], R[0, 2])
if R[0, 2] == -1:
E2 = np.pi/2
E1 = E3 + dlta
else:
E2 = -np.pi/2
E1 = -E3 + dlta
else:
E2 = -np.arcsin(R[0, 2])
E1 = np.arctan2(R[1, 2]/np.cos(E2), R[2, 2]/np.cos(E2))
E3 = np.arctan2(R[0, 1]/np.cos(E2), R[0, 0]/np.cos(E2))
eul = np.array([E1, E2, E3])
return eul
def mean_euler_error(config, action, y_predict, y_test):
# Convert from exponential map to Euler angles
n_batch = y_predict.shape[0]
nframes = y_predict.shape[1]
mean_errors = np.zeros([n_batch, nframes])
for i in range(n_batch):
for j in range(nframes):
if config.dataset == 'Human':
pred = unNormalizeData(y_predict[i], config.data_mean, config.data_std, config.dim_to_ignore)
gt = unNormalizeData(y_test[i], config.data_mean, config.data_std, config.dim_to_ignore)
else:
pred = copy.deepcopy(y_predict[i])
gt = copy.deepcopy(y_test[i])
for k in np.arange(3, pred.shape[1]-2, 3):
pred[j, k:k + 3] = rotmat2euler(expmap2rotmat(pred[j, k:k + 3]))
gt[j, k:k + 3] = rotmat2euler(expmap2rotmat(gt[j, k:k + 3]))
pred[:, 0:6] = 0
gt[:, 0:6] = 0
idx_to_use = np.where(np.std(gt, 0) > 1e-4)[0]
euc_error = np.power(gt[:, idx_to_use] - pred[:, idx_to_use], 2)
euc_error = np.sum(euc_error, 1)
euc_error = np.sqrt(euc_error)
mean_errors[i, :] = euc_error
mme = np.mean(mean_errors, 0)
print("\n" + action)
toprint_idx = np.array([1, 3, 7, 9, 13, 15, 17, 24])
idx = np.where(toprint_idx < len(mme))[0]
toprint_list = ["& {:.3f} ".format(mme[toprint_idx[i]]) for i in idx]
print("".join(toprint_list))
mme_mean = np.mean(mme[toprint_idx[idx]])
return mme, mme_mean
def forward_kinematics(data, config, bone_):
# this step is for forward
bone = np.zeros([bone_.shape[0]+1, bone_.shape[1]])
bone[1:, :] = bone_
nframes = data.shape[0]
data = data.reshape([nframes, -1, 3])
njoints = data.shape[1] + 1
lie_params = np.zeros([nframes, njoints, 6])
for i in range(njoints - 1):
lie_params[:, i, 0:3] = data[:, i, :]
lie_params[:, :, 3:6] = bone
lie_params[:, 0, 3:6] = np.zeros([3])
joint_xyz_f = np.zeros([nframes, njoints, 3])
for i in range(nframes):
joint_xyz_f[i, :, :] = computelie(np.squeeze(lie_params[i, :, :]))
return joint_xyz_f
def computelie(lie_params):
njoints = np.shape(lie_params)[0]
A = np.zeros((njoints, 4, 4))
for j in range(njoints):
if j == 0:
A[j, :, :] = lietomatrix(lie_params[j, 0: 3].T, lie_params[j, 3:6].T)
else:
A[j, :, :] = np.matmul(np.squeeze(A[j - 1, :, :]),
lietomatrix(lie_params[j, 0:3].T, lie_params[j, 3:6].T))
joint_xyz = np.zeros((njoints, 3))
for j in range(njoints):
coor = np.array([0, 0, 0, 1]).reshape((4, 1))
xyz = np.matmul(np.squeeze(A[j, :, :]), coor)
joint_xyz[j, :] = xyz[0:3, 0]
return joint_xyz
def lietomatrix(angle, trans):
R = expmap2rotmat(angle)
T = trans
SEmatrix = np.concatenate((np.concatenate((R, T.reshape(3, 1)), axis=1), np.array([[0, 0, 0, 1]])))
return SEmatrix
def fk(data, config, bone):
if config.dataset == 'Human':
xyz = []
for frame in range(config.output_window_size):
xyz_new = forward_kinematics_h36m(data[frame])
xyz.append(xyz_new)
xyz = np.array(xyz)
else:
xyz = forward_kinematics(data, config, bone)
return xyz
def forward_kinematics_h36m(angles):
"""
Modified from forward_kinematics_h36m
"""
parent = np.array([0, 1, 2, 3, 4, 5, 1, 7, 8, 9, 10, 1, 12, 13, 14, 15, 13,
17, 18, 19, 20, 21, 20, 23, 13, 25, 26, 27, 28, 29, 28, 31]) - 1
offset = np.array([[0., 0., 0.],
[-132.95, 0., 0.],
[0., -442.89, 0.],
[0., -454.21, 0.],
[0., 0., 162.77],
[0., 0., 75.],
[132.95, 0., 0.],
[0., -442.89, 0.],
[0., -454.21, 0.],
[0., 0., 162.77],
[0., 0., 75.],
[0., 0., 0.],
[0., 233.38, 0.],
[0., 257.08, 0.],
[0., 121.13, 0.],
[0., 115., 0.],
[0., 257.08, 0.],
[0., 151.03, 0.],
[0., 278.88, 0.],
[0., 251.73, 0.],
[0., 0., 0.],
[0., 0., 100.],
[0., 137.5, 0.],
[0., 0., 0.],
[0., 257.08, 0.],
[0., 151.03, 0.],
[0., 278.88, 0.],
[0., 251.73, 0.],
[0., 0., 0.],
[0., 0., 100.],
[0., 137.5, 0.],
[0., 0., 0.]])
expmapInd = np.split(np.arange(4, 100) - 1, 32)
# Structure that indicates parents for each joint
njoints = 32
xyzStruct = [dict() for x in range(njoints)]
for i in np.arange(njoints):
thisRotation = expmap2rotmat(angles[expmapInd[i]])
if parent[i] == -1: # Root node
xyzStruct[i]['rotation'] = thisRotation
# xyzStruct[i]['rotation'] = np.eye(3)
xyzStruct[i]['xyz'] = np.reshape(offset[i, :], (1, 3))
else:
xyzStruct[i]['xyz'] = (offset[i, :]).dot(xyzStruct[parent[i]]['rotation']) + xyzStruct[parent[i]]['xyz']
xyzStruct[i]['rotation'] = thisRotation.dot(xyzStruct[parent[i]]['rotation'])
xyz = [xyzStruct[i]['xyz'] for i in range(njoints)]
xyz = np.array(xyz).squeeze()
xyz = xyz[:, [0, 2, 1]]
return xyz
def prepare_loss(data, length, dim_to_ignore):
"""
recover ignore dimension in data to calculate lie loss
:param data: prediction data
:param length: length of one single human pose. 99 for h3.6m dataset
:param dim_to_ignore: get from function normalization_stats
:return: recovered data
"""
origData = torch.zeros([data.shape[0], data.shape[1], length], device=data.device)
dimensions_to_use = []
for i in range(length):
if i in dim_to_ignore:
continue
dimensions_to_use.append(i)
origData[:, :,dimensions_to_use] = data
return origData[:, :, 3:]
def normalize_data(data, data_mean, data_std, dim_to_use):
"""
Copied and modified from https://github.com/una-dinosauria/human-motion-prediction
"""
data_out = []
for idx in range(len(data)):
data_out.append(np.divide((data[idx] - data_mean), data_std))
data_out[-1] = data_out[-1][:, dim_to_use]
return data_out
def normalize_data_dir(data, data_mean, data_std, dim_to_use):
"""
Copied from https://github.com/una-dinosauria/human-motion-prediction
"""
data_out = {}
for key in data.keys():
data_out[key] = np.divide((data[key] - data_mean), data_std)
data_out[key] = data_out[key][:, dim_to_use]
return data_out
def normalization_stats(completeData):
"""
Copied from https://github.com/una-dinosauria/human-motion-prediction
"""
data_mean = np.mean(completeData, axis=0)
data_std = np.std(completeData, axis=0)
dimensions_to_ignore = []
dimensions_to_use = []
dimensions_to_ignore.extend(list(np.where(data_std < 1e-4)[0]))
dimensions_to_use.extend(list(np.where(data_std >= 1e-4)[0]))
data_std[dimensions_to_ignore] = 1.0
return [data_mean, data_std, dimensions_to_ignore, dimensions_to_use]
def unNormalizeData(normalizedData, data_mean, data_std, dimensions_to_ignore):
"""
Copied from https://github.com/una-dinosauria/human-motion-prediction
"""
T = normalizedData.shape[0]
D = data_mean.shape[0]
origData = np.zeros((T, D), dtype=np.float32)
dimensions_to_use = []
for i in range(D):
if i in dimensions_to_ignore:
continue
dimensions_to_use.append(i)
dimensions_to_use = np.array(dimensions_to_use)
origData[:, dimensions_to_use] = normalizedData
stdMat = data_std.reshape((1, D))
stdMat = np.repeat(stdMat, T, axis=0)
meanMat = data_mean.reshape((1, D))
meanMat = np.repeat(meanMat, T, axis=0)
origData = np.multiply(origData, stdMat) + meanMat
return origData
def revert_coordinate_space(channels, R0, T0):
"""
Copied from https://github.com/una-dinosauria/human-motion-prediction
"""
n, d = channels.shape
channels_rec = copy.copy(channels)
R_prev = R0
T_prev = T0
rootRotInd = np.arange(3, 6)
# Loop through the passed posses
for ii in range(n):
R_diff = expmap2rotmat(channels[ii, rootRotInd])
R = R_diff.dot(R_prev)
channels_rec[ii, rootRotInd] = np.reshape(rotmat2expmap(R), 3)
T = T_prev + ((R_prev.T).dot(np.reshape(channels[ii, :3], [3, 1]))).reshape(-1)
channels_rec[ii, :3] = T
T_prev = T
R_prev = R
return channels_rec
def rotmat2expmap(R):
theta = np.arccos((np.trace(R) - 1) / 2.0)
if theta < 1e-6:
A = np.zeros((3, 1))
else:
A = theta / (2 * np.sin(theta)) * np.array([[R[2, 1] - R[1, 2]], [R[0, 2] - R[2, 0]], [R[1, 0] - R[0, 1]]])
return A
class Progbar(object):
"""Progbar class copied from https://github.com/fchollet/keras/
Displays a progress bar.
Small edit : added strict arg to update
# Arguments
target: Total number of steps expected.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self, target, width=30, verbose=1):
self.width = width
self.target = target
self.sum_values = {}
self.unique_values = []
self.start = time.time()
self.total_width = 0
self.seen_so_far = 0
self.verbose = verbose
def update(self, current, values=[], exact=[], strict=[]):
"""
Updates the progress bar.
# Arguments
current: Index of current step.
values: List of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
exact: List of tuples (name, value_for_last_step).
The progress bar will display these values directly.
"""
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [v * (current - self.seen_so_far),
current - self.seen_so_far]
self.unique_values.append(k)
else:
self.sum_values[k][0] += v * (current - self.seen_so_far)
self.sum_values[k][1] += (current - self.seen_so_far)
for k, v in exact:
if k not in self.sum_values:
self.unique_values.append(k)
self.sum_values[k] = [v, 1]
for k, v in strict:
if k not in self.sum_values:
self.unique_values.append(k)
self.sum_values[k] = v
self.seen_so_far = current
now = time.time()
if self.verbose == 1:
prev_total_width = self.total_width
sys.stdout.write("\b" * prev_total_width)
sys.stdout.write("\r")
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)
bar = barstr % (current, self.target)
prog = float(current)/self.target
prog_width = int(self.width*prog)
if prog_width > 0:
bar += ('='*(prog_width-1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.'*(self.width-prog_width))
bar += ']'
sys.stdout.write(bar)
self.total_width = len(bar)
if current:
time_per_unit = (now - self.start) / current
else:
time_per_unit = 0
eta = time_per_unit*(self.target - current)
info = ''
if current < self.target:
info += ' - ETA: %ds' % eta
else:
info += ' - %ds' % (now - self.start)
for k in self.unique_values:
if type(self.sum_values[k]) is list:
info += ' - %s: %.4f' % (k,
self.sum_values[k][0] / max(1, self.sum_values[k][1]))
else:
info += ' - %s: %s' % (k, self.sum_values[k])
self.total_width += len(info)
if prev_total_width > self.total_width:
info += ((prev_total_width-self.total_width) * " ")
sys.stdout.write(info)
sys.stdout.flush()
if current >= self.target:
sys.stdout.write("\n")
if self.verbose == 2:
if current >= self.target:
info = '%ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s: %.4f' % (k,
self.sum_values[k][0] / max(1, self.sum_values[k][1]))
sys.stdout.write(info + "\n")
def add(self, n, values=[]):
self.update(self.seen_so_far+n, values)
| 2.171875
| 2
|
qhard/__init__.py
|
qhard/qhard
| 0
|
12780496
|
<filename>qhard/__init__.py
# This file is part of QHard: quantum hardware modelling.
#
# Author: <NAME>, 2017 and later.
############################################################################
# Qubits and resonator.
from qhard.fluxonium import *
from qhard.transmon import *
from qhard.cavity import *
# Interacting systems.
from qhard.coupobj import *
# Time-domain simulations.
from qhard.pshapes import *
| 1.476563
| 1
|
Acquisition/ubiment_parameters.py
|
LCAV/Lauzhack2020
| 0
|
12780497
|
<gh_stars>0
# Socket port and baudrate are also used as systemID
BEACON_PORT = 7582
SENSORS_PORT = 7586
system_list = {
'Beacon':{
'id': BEACON_PORT,
'port': BEACON_PORT,
'tags': [444, 14954135790684542069]
},
'Sensors':{
'id': SENSORS_PORT,
'port': SENSORS_PORT,
'tags': [0]
},
}
data_fields = [
"timestamp", # time in milliseconds in epoch time
# the device we want to locate (previously named tag_id)
"device_id",
# we use the UDP port as system id (for now at least, but we must change it to locate several device at a time)
"system_id",
# anchor used to take the current measure (previously named locator_id)
"anchor_id",
# [theta_x, theta_y, theta_z] corresponds to the orientation of the device (radian)
"theta_x",
"theta_y",
"theta_z",
"txpower", # transmitting power of the anchor
"rssi", # signal strength received by the device
# [acc_x, acc_y, acc_z] corresponds to the local acceleration vector of the device
"acc_x",
"acc_y",
"acc_z",
"is_step_detected" # step detected by imu
]
# These are the index of the values when we write them
data_timestamp = data_fields.index("timestamp")
data_device_id = data_fields.index("device_id")
data_system_id = data_fields.index("system_id")
data_anchor_id = data_fields.index("anchor_id")
data_txpower = data_fields.index("txpower")
data_rssi = data_fields.index("rssi")
data_theta_x = data_fields.index("theta_x")
data_theta_y = data_fields.index("theta_y")
data_theta_z = data_fields.index("theta_z")
data_acc_x = data_fields.index("acc_x")
data_acc_y = data_fields.index("acc_y")
data_acc_z = data_fields.index("acc_z")
def data_dict2list(dic):
return [dic[f] if f in list(dic.keys()) else 'NaN' for f in data_fields]
def data_list2dict(array):
dic = {}
for k in range(len(array)):
val = array[k]
if val is not None and not val == 'NaN':
field = data_fields[k]
dic[field] = val
return dic
imu_raw_fields = [
"timestamp",
"device_id",
"system_id",
"accelerometer_x",
"accelerometer_y",
"accelerometer_z",
"magnetic_field_x",
"magnetic_field_y",
"magnetic_field_z",
"gravity_x",
"gravity_y",
"gravity_z",
"linear_acceleration_x",
"linear_acceleration_y",
"linear_acceleration_z",
"gyroscope_x",
"gyroscope_y",
"gyroscope_z",
"orientation_qx",
"orientation_qy",
"orientation_qz",
"orientation_qw",
"orientation_gravaccgyro_qx",
"orientation_gravaccgyro_qy",
"orientation_gravaccgyro_qz",
"orientation_gravaccgyro_qw",
"orientation_accgyro_qx",
"orientation_accgyro_qy",
"orientation_accgyro_qz",
"orientation_accgyro_qw",
"is_step_detected_sensor",
"is_step_detected"
]
def imu_raw_dict2list(dic):
return [dic[f] if f in list(dic.keys()) else 'NaN' for f in imu_raw_fields]
| 2.640625
| 3
|
operators/object_detector.py
|
JustusSchwan/MasterThesis
| 0
|
12780498
|
import dlib
import cv2
from os import path
from dataflow import ports
class ObjectDetectorOpencv:
def __init__(self, model):
self.detector = cv2.CascadeClassifier(model)
self.source_detection = ports.EventSource()
def detect_object(self, img):
detect = self.detector.detectMultiScale(img, 1.2, 7, 0, (50, 50))
if len(detect) > 0:
(x, y, w, h) = detect[0]
self.source_detection.fire(dlib.rectangle(int(x), int(y), int(x + w), int(y + h)))
class MultiObjectDetectorOpencv:
def __init__(self, models):
self.detectors = []
for model in models:
self.detectors.append(cv2.CascadeClassifier(model))
self.source_detection = ports.EventSource()
def detect_object(self, img):
for detector in self.detectors:
detect = detector.detectMultiScale(img, 1.1, 7, 0, (50, 50))
if len(detect) > 0:
(x, y, w, h) = detect[0]
self.source_detection.fire(dlib.rectangle(int(x), int(y), int(x + w), int(y + h)))
break
def FaceDetectorOpencv():
return ObjectDetectorOpencv(
path.join(path.dirname(path.abspath(__file__)), 'models/haarcascade_frontalface_alt2.xml'))
def HandDetectorOpencv():
return MultiObjectDetectorOpencv(
(
path.join(path.dirname(path.abspath(__file__)), 'models/palm.xml'),
path.join(path.dirname(path.abspath(__file__)), 'models/fist.xml'),
path.join(path.dirname(path.abspath(__file__)), 'models/closed_frontal_palm.xml'),
path.join(path.dirname(path.abspath(__file__)), 'models/aGest.xml')
))
| 2.4375
| 2
|
pymtl3/passes/tracing/test/VcdGenerationPass_test.py
|
mondO/pymtl3
| 0
|
12780499
|
#=========================================================================
# VcdGenerationPass_test.py
#=========================================================================
# Perform limited tests on the VCD generation pass. These tests are limited
# in the sense that they do not compare the entire output against some
# reference output, which is hard to obtain in the case of VCD generation.
# Our goal is to have some regression test cases that can hopefully inform
# us of any incompatible changes that lead to the failure of VCD generation
# during a major update of PyMTL.
#
# Author: <NAME>
# Date: Nov 1, 2019
from pymtl3.datatypes import *
from pymtl3.dsl import *
from pymtl3.passes import TracingConfigs
from pymtl3.passes.PassGroups import SimulationPass
def run_test( dut, tv, tv_in, tv_out ):
vcd_file_name = dut.__class__.__name__ + "_funky"
dut.config_tracing = TracingConfigs( tracing='vcd', vcd_file_name=vcd_file_name )
dut.elaborate()
dut.apply( SimulationPass() )
for v in tv:
tv_in( dut, v )
dut.tick()
tv_out( dut, v )
with open(vcd_file_name+".vcd") as fd:
file_str = ''.join( fd.readlines() )
all_signals = dut.get_input_value_ports() + \
dut.get_output_value_ports() + \
dut.get_wires()
for signal in all_signals:
assert signal._dsl.my_name in file_str
def test_vector_signals():
class A( Component ):
def construct( s ):
s.in0 = InPort( Bits32 )
s.in1 = InPort( Bits32 )
s.out = OutPort( Bits32 )
@s.update
def add_upblk():
s.out = s.in0 + s.in1
def tv_in( m, tv ):
m.in0 = tv[0]
m.in1 = tv[1]
def tv_out( m, tv ):
assert m.out == tv[2]
run_test( A(), [
# in0 in1 out
[ b32(0), b32(-1), b32(-1), ],
[ b32(1), b32(1), b32(2), ],
[ b32(-1), b32(0), b32(-1), ],
[ b32(-1), b32(0), b32(-1), ],
[ b32(-1), b32(0), b32(-1), ],
[ b32(-1), b32(0), b32(-1), ],
[ b32(42), b32(42), b32(84), ],
], tv_in, tv_out )
def test_bitstruct_signals():
bs = mk_bitstruct( "BitStructType", {
'foo' : Bits1,
'bar' : Bits32,
} )
class A2( Component ):
def construct( s ):
s.in0 = InPort( bs )
s.in1 = InPort( Bits32 )
s.out = OutPort( Bits32 )
@s.update
def add_upblk():
s.out = s.in0.bar + s.in1
def tv_in( m, tv ):
m.in0 = tv[0]
m.in1 = tv[1]
def tv_out( m, tv ):
assert m.out == tv[2]
run_test( A2(), [
# in0 in1 out
[ bs(b1(0), b32(0)), b32(-1), b32(-1), ],
[ bs(b1(0), b32(1)), b32(1), b32(2), ],
[ bs(b1(0), b32(-1)), b32(0), b32(-1), ],
[ bs(b1(0), b32(42)), b32(42), b32(84), ],
], tv_in, tv_out )
| 2.09375
| 2
|
src/settings/admin.py
|
oussamabouchikhi/Bigdeals
| 2
|
12780500
|
<reponame>oussamabouchikhi/Bigdeals<gh_stars>1-10
from django.contrib import admin
# Register your models here.
from .models import Brand, Variant
admin.site.register(Brand)
admin.site.register(Variant)
| 1.234375
| 1
|
ncitools/_vdimain.py
|
Kirill888/nci-tools
| 6
|
12780501
|
import click
import sys
from collections import namedtuple
from random import randint
Ctx = namedtuple('Ctx', ['ctl', 'ssh', 'ssh_cfg'])
@click.group()
@click.pass_context
@click.option('--host', default='vdi.nci.org.au', help='Customize vdi login node')
@click.option('--user', help='SSH user name, if not given will be read from ~/.ssh/config')
@click.option('--no-ask', is_flag=True, help='Do not ask for passwords')
def cli(ctx, host, user, no_ask):
""" Control and query info about VDI sessions
"""
from ._ssh import open_ssh
from .vdi import vdi_ctl
try:
ssh, ssh_cfg = open_ssh(host, user, no_ask=no_ask)
except:
click.echo('Failed to connect to "{}{}"'.format(user+'@' if user else '', host))
ctx.exit()
ctl = vdi_ctl(ssh)
ctx.obj = Ctx(ssh=ssh, ssh_cfg=ssh_cfg, ctl=ctl)
@cli.command('launch')
@click.pass_obj
@click.option('--force', is_flag=True, help='Launch new session even if one is already running')
def launch(ctx, force):
""" Launch session if not running
"""
ctl = ctx.ctl
jobs = ctl('list-avail', '--partition', 'main', flatten=False)
if len(jobs) != 0 and not force:
click.echo('Job already running', err=True)
sys.exit(1)
job = ctl('launch', '--partition', 'main')
click.echo(job.get('id'))
return 0
@cli.command('terminate')
@click.pass_obj
def terminate(ctx):
""" Shutdown session (all sessions actually)
"""
ctl = ctx.ctl
jobs = ctl('list-avail', '--partition', 'main', flatten=False)
for job in jobs:
jobid = job['id']
click.echo('Terminating {}'.format(jobid))
ctl('terminate', '--jobid', jobid)
@cli.command('host')
@click.pass_obj
def hostname(ctx):
""" Print hostname for every active session
"""
ctl = ctx.ctl
jobs = ctl('list-avail', '--partition', 'main', flatten=False)
if len(jobs) == 0:
click.echo('No jobs running', err=True)
sys.exit(1)
for job in jobs:
host = ctl('get-host', '--jobid', job['id']).get('host')
click.echo(host)
return 0
@cli.command('get-passwd')
@click.pass_obj
def get_passwd(ctx):
""" Print VNC password
"""
ctl = ctx.ctl
password = ctl('get-passwd').get('passwd')
if password is None:
click.echo('Failed to query VNC password', err=True)
sys.exit(1)
click.echo(password)
return 0
def collect_vnc_info(ctl, job_id, ssh_cfg):
from ._ssh import mk_ssh
from .vdi import vdi_ctl
cfg = dict(**ssh_cfg)
host = ctl('get-host', '--jobid', job_id).get('host')
passwd = ctl('get-passwd').get('passwd')
cfg['hostname'] = host
try:
client_ctl = vdi_ctl(mk_ssh(cfg))
except:
click.echo('Failed to connect to {}'.format(host), err=True)
sys.exit(2)
display = client_ctl('get-display-nbr').get('display')
if display is None:
click.echo('Failed to query display {}'.format(host), err=True)
sys.exit(3)
try:
display = int(display[1:]) # Parse `:2`
except ValueError:
click.echo('Failed to parse display number: "{}"'.format(display))
sys.exit(3)
return dict(host=host,
display=display,
port=display+5900,
passwd=passwd)
def get_vnc_tunnel_cmd(ctx, job_id, local_port):
v_map = {True: 'yes', False: 'no'}
opts = dict(
PasswordAuthentication=False,
ChallengeResponseAuthentication=False,
KbdInteractiveAuthentication=False,
PubkeyAuthentication=True,
StrictHostKeyChecking=True,
)
args = ['-T'] + ['-o{}={}'.format(k, v_map.get(v, v))
for k, v in opts.items()]
cmd = '/opt/vdi/bin/session-ctl --configver=20173552330 tunnel'.split(' ')
user = ctx.ssh_cfg.get('user')
if user is not None:
args.extend(['-l', user])
info = collect_vnc_info(ctx.ctl, job_id, ctx.ssh_cfg)
fwd_args = ['-L',
'{local_port}:127.0.0.1:{remote_port} {host}'.format(
local_port=local_port,
remote_port=info['port'],
host=info['host'])]
return ['ssh'] + args + fwd_args + cmd
@cli.command('display-nbr')
@click.option('--as-port', is_flag=True, help='Print it as a port number of the VNC server')
@click.pass_obj
def display_nbr(ctx, as_port=False):
""" Print display number for active session (s)
"""
ctl = ctx.ctl
jobs = ctl('list-avail', '--partition', 'main', flatten=False)
if len(jobs) == 0:
click.echo('No jobs running', err=True)
sys.exit(1)
for job in jobs:
info = collect_vnc_info(ctl, job['id'], ctx.ssh_cfg)
if as_port:
click.echo('%d' % info['port'])
else:
click.echo(':%d' % info['display'])
@cli.command('vnc-tunnel-cmd')
@click.option('--local-port', type=int, default=0, help='Local port to use for ssh forwarding')
@click.pass_obj
def vnc_tunnel_cmd(ctx, local_port=0):
""" Print port forwarding command
"""
ctl = ctx.ctl
jobs = ctl('list-avail', '--partition', 'main', flatten=False)
if len(jobs) == 0:
click.echo('No jobs running', err=True)
sys.exit(1)
local_port = local_port or randint(10000, 65000)
for job in jobs:
cmd = get_vnc_tunnel_cmd(ctx, job['id'], local_port)
click.echo(' '.join(cmd))
@cli.command('nbconnect')
@click.option('--local-port', type=int, default=0, help='Local port to use for ssh forwarding')
@click.option('--runtime-dir', help='Jupyter runtime dir on a remote `jupyter --runtime-dir`')
@click.pass_obj
def nbconnect(ctx, local_port=0, runtime_dir=None):
""" Connect to notebook on VDI
"""
from ._ssh import mk_ssh
from .nbconnect import run_nb_tunnel
ctl = ctx.ctl
ssh_cfg = ctx.ssh_cfg
jobs = ctl('list-avail', '--partition', 'main', flatten=False)
if len(jobs) == 0:
click.echo('No jobs running', err=True)
sys.exit(1)
for job in jobs:
host = ctl('get-host', '--jobid', job['id']).get('host')
ssh_cfg['hostname'] = host
try:
ssh = mk_ssh(ssh_cfg)
except:
click.echo('Failed to connect to {}'.format(host))
sys.exit(2)
sys.exit(run_nb_tunnel(ssh, ssh_cfg, runtime_dir=runtime_dir, local_port=local_port))
def _cli():
cli(obj={})
if __name__ == '__main__':
_cli()
| 2.3125
| 2
|
authors/apps/articles/views.py
|
andela/ah-the-answer-backend
| 0
|
12780502
|
<filename>authors/apps/articles/views.py
from collections import OrderedDict
import os
from rest_framework.response import Response
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.exceptions import APIException
from rest_framework.pagination import LimitOffsetPagination
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
import cloudinary
from drf_yasg.utils import swagger_auto_schema
from .serializers import (ArticleSerializer, ArticleImageSerializer,
ReviewsSerializer, HighlightSerializer,
FavoriteSerializer, UserLikesArticleSerialzer)
from rest_framework import status
from .models import (Article, ArticleImage, LikeArticles, FavoriteModel,
ReviewsModel, Highlight)
from .permissions import ReadOnly
from authors.apps.authentication.models import User
from .utils import (is_article_owner, has_reviewed, round_average,
generate_share_url)
from .filters import ArticleFilter
from .utils import generate_share_url
from authors.apps.notify.views import NotificationsView
def find_article(slug):
"""Method to check if an article exists"""
try:
return Article.objects.get(slug=slug)
except Article.DoesNotExist:
APIException.status_code = 404
raise APIException({
"message": "The article requested does not exist"
})
def find_favorite(slug):
"""Method checks if an article is available in the FavouriteModel"""
try:
return FavoriteModel.objects.get(article__slug=slug)
except FavoriteModel.DoesNotExist:
APIException.status_code = 404
raise APIException({
"message": "The article requested does not exist in your favorites"
})
def get_highlights(slug):
"""Method to get all highlights of an article by slug"""
return Highlight.objects.select_related(
'article').filter(article__slug=slug)
def format_highlight(highlights, saved_article):
"""Method to update start and end index of highlight if article
body is updated or to delete the highlight if it does not exist"""
for highlight_count in range(len(highlights)):
highlight = Highlight.objects.get(
pk=highlights[highlight_count].pk
)
section = highlights[highlight_count].section
start = highlights[highlight_count].start
end = highlights[highlight_count].end
body_segment = saved_article.body[
start:end + 1]
# Find if highlighted section still exists
highlight_result = saved_article.body.find(section)
updated_end = highlight_result + len(section) - 1
# Find if there are multiple occurences of section
section_count = saved_article.body.count(section)
highlight_data = {
"start": highlight_result,
"end": updated_end
}
# update the new start and end positions
highlight_serializer = HighlightSerializer(
instance=highlight, data=highlight_data, partial=True
)
# Compare highlighted section with article body
if section != body_segment and (
highlight_result == -1 or section_count > 1):
Highlight.objects.get(
pk=highlights[highlight_count].pk).delete()
if section_count == 1:
highlight_serializer.is_valid(raise_exception=True)
highlight_serializer.save()
def find_image(id, slug):
"""Method to find an image by id"""
return ArticleImage.objects.filter(pk=id).select_related(
'article').filter(article__slug=slug)
def get_images(slug):
"""Method to get all images for an article"""
return ArticleImage.objects.select_related(
'article').filter(article__slug=slug)
class ArticleView(APIView):
"""Class that contains the method that retrieves all articles and
creates an article"""
permission_classes = (IsAuthenticated | ReadOnly,)
filter_fields = ('author', 'title',)
pagination_class = LimitOffsetPagination
def get(self, request):
"""Method to get all articles"""
# Functionality to search articles by description, author and title
if request.GET.get('search'):
search_parameter = request.GET.get('search')
searched_articles = Article.objects.filter(Q(
title__icontains=search_parameter) | Q(
description__icontains=search_parameter) | Q(
author__username__icontains=search_parameter))
# filter the model for tags by converting query parameters into a list and
# comparing that query list with list of tags in every instance of the object
if not searched_articles:
tag_list = search_parameter.split(",")
searched_articles = Article.objects.filter(
tags__name__in=tag_list
)
searched_articles.distinct()
search_serializer = ArticleSerializer(
searched_articles, many=True)
return Response({"articles": search_serializer.data})
# Functionality to filter articles by author and title
articles = Article.objects.all()
article_filter = ArticleFilter()
filtered_articles = article_filter.filter_queryset(
request, articles, self)
# loop through articles and generate a tags list using the values from the model
if filtered_articles.exists():
for article in filtered_articles:
article.tags = list(article.tags.names())
paginator = self.pagination_class()
page = paginator.paginate_queryset(filtered_articles, request)
if filtered_articles:
serializer = ArticleSerializer(page, many=True)
page_results = paginator.get_paginated_response(serializer.data)
# rename key 'results' to 'articles'
response = OrderedDict([('articles', v) if k == 'results' else (k, v) for k, v in page_results.data.items()])
return Response(response, status=200)
else:
return Response({"message": "No article found", "articles": []},
status=200)
@swagger_auto_schema(request_body=ArticleSerializer,
responses={201: ArticleSerializer(),
400: "Bad Request",
403: "Forbidden",
404: "Not Found"})
def post(self, request):
"""Method to create an article"""
article = request.data.get('article')
# Create an article from the above data
serializer = ArticleSerializer(data=article)
if serializer.is_valid(raise_exception=True):
article_saved = serializer.save(author=self.request.user)
NotificationsView.send_notification(
"@{0} has posted a new article at {1}".format(
self.request.user.username,
os.getenv('DOMAIN') + '/api/articles/' + serializer.data.get('slug') + '/'
),
serializer.data,
'new-article'
)
return Response({
"success": "Article '{}' created successfully".format(
article_saved.title),
"article": serializer.data
}, status=201)
class RetrieveArticleView(APIView):
"""Class with get, put and delete methods"""
permission_classes = (IsAuthenticated | ReadOnly,)
def is_owner(self, current_user_id, article_author_id):
if article_author_id == current_user_id:
return True
def get(self, request, slug):
"""Method to get a specific article"""
article = find_article(slug)
article.tags = list(article.tags.names())
serializer = ArticleSerializer(article, many=False)
return Response({"article": serializer.data})
@swagger_auto_schema(request_body=ArticleSerializer,
responses={200: ArticleSerializer(),
400: "Bad Request",
404: "Not Found",
403: "Forbidden"})
def put(self, request, slug):
"""Method to update a specific article"""
saved_article = find_article(slug)
highlights = get_highlights(slug)
data = request.data.get('article')
serializer = ArticleSerializer(
instance=saved_article, data=data, partial=True)
if serializer.is_valid(raise_exception=True):
if self.is_owner(saved_article.author.id, request.user.id) is True:
article_saved = serializer.save()
# Delete/Update highlights affected by updates on article body
format_highlight(highlights, saved_article)
return Response({
"success": "Article '{}' updated successfully".format(
article_saved.title),
"article": serializer.data
})
response = {"message": "Only the owner can edit this article."}
return Response(response, status=403)
def delete(self, request, slug):
"""Method to delete a specific article and all its images"""
article = find_article(slug)
images = get_images(slug)
if self.is_owner(article.author.id, request.user.id) is True:
for image in range(len(images)):
cloudinary.uploader.destroy(images[image].public_id)
article.delete()
return Response(
{"message": "Article `{}` has been deleted.".format(slug)},
status=200)
response = {"message": "Only the owner can delete this article."}
return Response(response, status=403)
class ArticleImageView(APIView):
"""Class with methods to upload an image and retrieve all images of an
article"""
permission_classes = (IsAuthenticated | ReadOnly,)
@swagger_auto_schema(request_body=ArticleImageSerializer,
responses={200: ArticleImageSerializer(),
400: "Bad Request",
403: "Forbidden",
404: "Not Found"},
)
def post(self, request, slug):
"""Method to upload an image"""
article = find_article(slug)
if article.author.id != request.user.id:
return Response({
"message": "Only the owner of this article can upload images."
}, status=403)
if request.FILES:
try:
response = cloudinary.uploader.upload(
request.FILES['file'],
allowed_formats=[
'png', 'jpg', 'jpeg', 'gif'
]
)
except Exception as e:
APIException.status_code = 400
raise APIException({
"errors": str(e)
})
image_url = response.get('secure_url')
public_id = response.get('public_id')
height = response.get('height')
width = response.get('width')
serializer = ArticleImageSerializer(
data={
"image_url": image_url, "public_id": public_id,
"height": height, "width": width
}
)
if serializer.is_valid(raise_exception=True):
serializer.save(article=article)
response = {
"message": "Image for article `{}` uploaded successfully."
.format(slug),
"image_url": image_url, "height": height, "width": width
}
return Response(response, status=200)
else:
response = {
"message": "Please select an image."
}
return Response(response, status=400)
def get(self, request, slug):
"""Method to get all images of an article"""
find_article(slug)
images = get_images(slug)
serializer = ArticleImageSerializer(images, many=True)
return Response(
{
"images": serializer.data,
"imagesCount": images.count()
}
)
class ArticleImageDetailView(APIView):
"""Class with methods to get and delete a specific image"""
permission_classes = (IsAuthenticated | ReadOnly,)
def get(self, request, slug, id):
"""Method to get a specific image by its id"""
find_article(slug)
image = find_image(id, slug)
image_serializer = ArticleImageSerializer(image, many=True)
return Response(
{
"image": image_serializer.data
}
)
def delete(self, request, slug, id):
"""Method to delete a specific image by its id"""
article = find_article(slug)
image = find_image(id, slug)
if not image:
return Response({
"message": "The requested image does not exist."
}, status=404)
if article.author.id == request.user.id:
cloudinary.uploader.destroy(image[0].public_id)
image.delete()
return Response({
"message": "Image `{}` for article `{}` has been deleted."
.format(id, slug)
}, status=200)
response = {"message": "Only the owner can delete this image."}
return Response(response, status=403)
class ReviewView(APIView):
permission_classes = (IsAuthenticated | ReadOnly,)
@swagger_auto_schema(request_body=ReviewsSerializer,
responses={200: ReviewsSerializer(),
400: "Bad Request",
403: "Forbidden",
404: "Not Found"},)
def post(self, request, slug):
saved_article = find_article(slug)
if saved_article.author.pk == self.request.user.pk:
APIException.status_code = status.HTTP_400_BAD_REQUEST
raise APIException(
{"message": "You cannot review your own article"})
if has_reviewed(saved_article, self.request.user):
APIException.status_code = status.HTTP_403_FORBIDDEN
raise APIException(
{"message": "You have already reviewed this article"})
review = request.data.get('review')
serializer = ReviewsSerializer(data=review)
if serializer.is_valid(raise_exception=True):
serializer.save(article=saved_article,
reviewed_by=self.request.user)
return Response(
{
"success": "Review for '{}' created successfully".format(saved_article.title),
"Review": serializer.data
},
status=201
)
def get(self, request, slug):
try:
saved_article = find_article(slug)
average_rating = ReviewsModel.average_rating(saved_article.pk)
reviews = ReviewsModel.objects.filter(article__slug=slug)
serializer = ReviewsSerializer(reviews, many=True)
return Response(
{
"Average Rating": round_average(average_rating.get('rating_value__avg')),
"reviews": serializer.data},
)
except TypeError:
APIException.status_code = status.HTTP_404_NOT_FOUND
raise APIException(
{"errors": "There are no reviews for that article"})
@swagger_auto_schema(request_body=ReviewsSerializer,
responses={200: ReviewsSerializer(),
400: "Bad Request",
403: "Forbidden",
404: "Not Found"})
def put(self, request, slug, username=None):
try:
if username is None:
raise TypeError
saved_article = find_article(slug)
review = ReviewsModel.objects.get(
article=saved_article, reviewed_by__username=username)
if review and self.request.user.username == username:
data = request.data.get('review')
serializer = ReviewsSerializer(
instance=review, data=data, partial=True)
if serializer.is_valid(raise_exception=True):
review_saved = serializer.save()
return Response(
{"message": "Review for '{}' has been updated.".format(slug),
"Review": serializer.data
}, status=200)
return Response(
{"message": "You are Unauthorized to edit that review"},
status= 403
)
except ObjectDoesNotExist:
APIException.status_code = status.HTTP_404_NOT_FOUND
raise APIException(
{"errors": "That Review does not exist"})
except TypeError:
APIException.status_code = status.HTTP_400_BAD_REQUEST
raise APIException({"errors": "Invalid Url"})
except Exception as e:
APIException.status_code = status.HTTP_400_BAD_REQUEST
raise APIException({"errors": e.detail})
def delete(self, request, slug, username=None):
try:
if username is None:
raise TypeError
saved_article = find_article(slug)
review = ReviewsModel.objects.get(
article=saved_article, reviewed_by=self.request.user.pk)
if review and self.request.user.username == username:
review.delete()
return Response({"message": "Review for '{}' has been deleted.".format(slug)}, status=200)
raise APIException(
{"message": "You are Unauthorized to delete that review"})
except ObjectDoesNotExist:
APIException.status_code = status.HTTP_404_NOT_FOUND
raise APIException(
{"errors": "That Review does not exist"})
except TypeError:
APIException.status_code = status.HTTP_400_BAD_REQUEST
raise APIException({"errors": "Invalid Url"})
except Exception as e:
APIException.status_code = status.HTTP_400_BAD_REQUEST
raise APIException({"errors": e.detail})
class LikeArticleView(APIView):
"""
Class for POST view allowing authenticated users to like articles
"""
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(request_body=ArticleSerializer,
responses={201: ArticleSerializer(),
400: "Bad Request",
403: "Forbidden",
404: "Not Found"})
def post(self, request, slug):
"""
method for generating a like for a particular article
"""
article = find_article(slug)
liked = LikeArticles.react_to_article(request.user, article, 1)
if not liked:
return Response({
'message': 'you have reverted your'
' like for the article: {}'.format(article.title),
'article': ArticleSerializer(article).data
}, status=status.HTTP_202_ACCEPTED)
return Response({
'message': 'you liked the article: {}'.format(article.title),
'article': ArticleSerializer(article).data
},
status=status.HTTP_201_CREATED)
class DislikeArticleView(APIView):
"""
Class for POST view allowing authenticated users to dislike articles
"""
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(request_body=ArticleSerializer,
responses={201: ArticleSerializer(),
400: "Bad Request",
403: "Forbidden",
404: "Not Found"},)
def post(self, request, slug):
"""
method for generating a dislike for a particular article
"""
article = find_article(slug)
disliked = LikeArticles.react_to_article(request.user, article, 0)
if not disliked:
return Response({
'message': 'you have reverted your'
' dislike for the article: {}'.format(
article.title),
'article': ArticleSerializer(article).data
}, status=status.HTTP_202_ACCEPTED)
return Response({
'message': 'you disliked the article: {}'.format(article.title),
'article': ArticleSerializer(article).data
},
status=status.HTTP_201_CREATED)
class SocialShareArticleView(APIView):
permission_classes = (IsAuthenticated | ReadOnly,)
def get(self, request, slug, provider):
shared_article = find_article(slug)
context = {'request': request}
uri = request.build_absolute_uri()
# Remove the share/provider/ after the absolute uri
article_uri = uri.rsplit('share/', 1)[0]
try:
share_link = generate_share_url(
context, provider, shared_article, article_uri)
if share_link:
return Response({
"share": {
"provider": provider,
"link": share_link
}
})
except KeyError:
return Response({
"message": "Please select a valid provider - twitter, "
"facebook, email, telegram, linkedin, reddit"
}, status=200)
class FavoriteView(APIView):
"""This views handles the logic for creating and updating
records of a user's favorite articles
"""
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(request_body=FavoriteSerializer,
responses={201: FavoriteSerializer(),
400: "Bad Request",
403: "Forbidden",
404: "Not Found"})
def post(self, request, slug):
"""
To favorite an article users only need to hit this endpoint
/api/article/<slug>/favorite without any body
:param request:
:param slug:
:return: A success message of the article marked as favorite
"""
article = find_article(slug)
try:
fav = FavoriteModel(user=request.user, article=article,
favorite=True)
fav.save()
except:
return Response({"message": "Added to favorites"})
return Response({"article": FavoriteSerializer(fav).data,
"message": "Added to favorites"}, status=201)
def delete(self, request, slug):
"""
When a user unmarks an article from being favorite, the record is
deleted from the favorite model
:param request:
:param slug:
:return:
"""
article = find_article(slug)
find_favorite(slug)
FavoriteModel.objects.get(article=article.id,
user=request.user).delete()
return Response({"message": "Removed from favorites"}, status=200)
class FavoriteListView(APIView):
"""Lists all the articles that a user has marked as favorite"""
permission_classes = (IsAuthenticated,)
def get(self, request):
"""
Gets all articles for that user that they have marked as favorite
:param request:
:return:
"""
favs = FavoriteModel.objects.filter(user=request.user)
return Response({"articles": FavoriteSerializer(favs, many=True).data,
"count": favs.count()}, status=200)
class HighlightView(APIView):
"""Class with methods to highlight and retrieve all highlights"""
permission_classes = (IsAuthenticated,)
def post(self, request, slug):
"""Method for highlighting an article"""
article = find_article(slug)
body_length = len(article.body)
article_id = article.id
user = self.request.user
highlight = request.data.get('highlight')
# Create a highlight from the above data
serializer = HighlightSerializer(data=highlight)
if serializer.is_valid(raise_exception=True):
start = highlight['start']
end = highlight['end']
section = article.body[start:end+1]
try:
comment = highlight['comment']
except KeyError:
comment = ''
if start >= end:
return Response({
"message": "Start position cannot be equal to"
" or greater than end position"
}, status=400)
if end > body_length - 1:
return Response({
"message": "End position is greater"
" than the article size of {}".format(body_length - 1)
}, status=400)
# check if highlight exists
highlight = Highlight.objects.filter(article=article_id,
user=user, start=start,
end=end,
comment=comment)
# If highlight or comment exists unhighlight or uncomment
if highlight.exists():
if comment == '':
message = "Highlight has been removed"
else:
message = "Comment has been removed"
highlight.delete()
return Response({"message": message})
if comment == '':
message = "Highlight has been added"
else:
message = "Comment has been added"
serializer.save(article=article, user=self.request.user,
section=section)
return Response({
"message": message,
"highlight": serializer.data
}, status=201)
def get(self, request, slug):
"""Method to retrieve all highlights for an article by slug"""
find_article(slug)
serializer = HighlightSerializer(get_highlights(slug), many=True)
return Response({
"highlights": serializer.data,
"highlightsCount": get_highlights(slug).count()
}, status=200)
class UserLikesArticleView(APIView):
"""Fetches a boolean for whether a user already liked an article or
not"""
def get(self, request, slug):
article = find_article(slug)
liked = LikeArticles.objects.filter(article=article, user=request.user)
# import pdb;pdb.set_trace()
if len(liked) > 0:
return Response({
'message': 'You have reacted to this article before',
'liked': UserLikesArticleSerialzer(liked, many=True).data
}, 200)
return Response({
'message': 'You have not reacted to this article'
})
class AllUserLikesArticleView(APIView):
"""Fetches all articles that a user has liked not"""
def get(self, request):
liked = LikeArticles.objects.filter(user=request.user)
if len(liked) > 0:
return Response({
'message': 'You have reacted to these articles',
'liked': UserLikesArticleSerialzer(liked, many=True).data
}, 200)
return Response({
'message': 'You have not reacted to any article'
})
| 1.96875
| 2
|
tidb/tests/test_tidb.py
|
davidlrosenblum/integrations-extras
| 1
|
12780503
|
import pytest
# test transforming tidb check config to openmetrics check config
from datadog_checks.base.utils.tagging import GENERIC_TAGS
from datadog_checks.tidb import TiDBCheck
from .conftest import EXPECTED_PD, EXPECTED_TIDB, EXPECTED_TIKV
@pytest.mark.unit
def test_create_check_instance_transform(tidb_instance):
check = TiDBCheck("test_config_transform", {}, [tidb_instance])
assert check.instance.get('prometheus_url') == 'http://localhost:10080/metrics'
assert check.instance.get('namespace') == 'tidb_cluster'
assert check.instance.get('tags') == ['tidb_cluster_name:test', 'tidb_cluster_component:tidb']
mapper = check.instance.get('labels_mapper')
for label in GENERIC_TAGS:
assert mapper.get(label) == label + "_in_app"
@pytest.mark.unit
def test_tidb_mock_metrics(aggregator, mock_tidb_metrics, tidb_instance):
check = TiDBCheck("test_tidb_mock_metrics", {}, [tidb_instance])
_check_and_assert(aggregator, EXPECTED_TIDB, check)
@pytest.mark.unit
def test_pd_mock_metrics(aggregator, mock_pd_metrics, pd_instance):
check = TiDBCheck("test_pd_mock_metrics", {}, [pd_instance])
_check_and_assert(aggregator, EXPECTED_PD, check)
@pytest.mark.unit
def test_tikv_mock_metrics(aggregator, mock_tikv_metrics, tikv_instance):
check = TiDBCheck("test_tidb_mock_metrics", {}, [tikv_instance])
_check_and_assert(aggregator, EXPECTED_TIKV, check)
@pytest.mark.integration
@pytest.mark.usefixtures('dd_environment')
def test_cluster_metrics(aggregator, pd_instance, tikv_instance, tidb_instance):
check = TiDBCheck("test_cluster_metrics", {}, [tidb_instance])
_check_and_assert(aggregator, EXPECTED_TIDB, check)
check = TiDBCheck("test_cluster_metrics", {}, [pd_instance])
_check_and_assert(aggregator, EXPECTED_PD, check)
check = TiDBCheck("test_cluster_metrics", {}, [tikv_instance])
_check_and_assert(aggregator, EXPECTED_TIKV, check)
def _check_and_assert(agg, expected, c):
c.check(c.instance)
for name, tags in expected['metrics'].items():
agg.assert_metric(name, tags=tags)
for name, tags in expected['service_check'].items():
agg.assert_service_check(name, status=TiDBCheck.OK, tags=tags)
# since tidb cluster metrics cannot be listed thoroughly, we disable all completeness assertions here
# agg.assert_all_metrics_covered()
# agg.assert_metrics_using_metadata(get_metadata_metrics(), check_metric_type=False)
| 1.992188
| 2
|
tests/integration/pagure/test_service.py
|
mmuzila/ogr
| 0
|
12780504
|
import pytest
from requre.online_replacing import record_requests_for_all_methods
from tests.integration.pagure.base import PagureTests
from ogr.exceptions import OgrException
@record_requests_for_all_methods()
class Service(PagureTests):
def test_project_create(self):
"""
Remove https://pagure.io/"name" before data regeneration
in case you are not owner of repo, create your
"""
name = "new-ogr-testing-repo-jscotka"
project = self.service.get_project(repo=name, namespace=None)
assert not project.exists()
new_project = self.service.project_create(repo=name)
assert new_project.exists()
assert new_project.repo == name
project = self.service.get_project(repo=name, namespace=None)
assert project.exists()
def test_project_create_with_description(self):
"""
Remove https://pagure.io/"name" before data regeneration
in case you are not owner of repo, create your
"""
name = "new-ogr-testing-repo-with-description"
description = "The description of the newly created project."
project = self.service.get_project(repo=name, namespace=None)
assert not project.exists()
new_project = self.service.project_create(repo=name, description=description)
assert new_project.exists()
assert new_project.repo == name
assert new_project.get_description() == description
project = self.service.get_project(repo=name, namespace=None)
assert project.exists()
assert new_project.get_description() == description
def test_project_create_in_the_group(self):
"""
Remove https://pagure.io/packit-service/new-ogr-testing-repo-in-the-group
before data regeneration, if you have rigths to remove it, in other case
create your suffix
"""
name = "new-ogr-testing-repo-in-the-group-jscotka"
namespace = "packit-service"
project = self.service.get_project(repo=name, namespace=namespace)
assert not project.exists()
new_project = self.service.project_create(repo=name, namespace=namespace)
assert new_project.exists()
assert new_project.repo == name
project = self.service.get_project(repo=name, namespace=namespace)
assert project.exists()
def test_project_create_invalid_namespace(self):
name = "new-ogr-testing-repo"
namespace = "nonexisting"
with pytest.raises(OgrException, match=r".*Namespace doesn't exist.*"):
self.service.project_create(repo=name, namespace=namespace)
project = self.service.get_project(repo=name, namespace=namespace)
assert not project.exists()
def test_project_create_unauthorized_namespace(self):
name = "new-ogr-testing-repo"
namespace = "fedora-magazine"
with pytest.raises(
OgrException, match=r".*Cannot create project in given namespace.*"
):
self.service.project_create(repo=name, namespace=namespace)
project = self.service.get_project(repo=name, namespace=namespace)
assert not project.exists()
| 2.421875
| 2
|
huaweicloud-sdk-classroom/huaweicloudsdkclassroom/v3/model/judgement_result.py
|
huaweicloud/huaweicloud-sdk-python-v3
| 64
|
12780505
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class JudgementResult:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'output': 'str',
'file_id': 'str',
'image_id': 'str',
'case_count': 'int',
'executed_count': 'int',
'testcases': 'list[JudgementCaseResult]'
}
attribute_map = {
'output': 'output',
'file_id': 'file_id',
'image_id': 'image_id',
'case_count': 'case_count',
'executed_count': 'executed_count',
'testcases': 'testcases'
}
def __init__(self, output=None, file_id=None, image_id=None, case_count=None, executed_count=None, testcases=None):
"""JudgementResult - a model defined in huaweicloud sdk"""
self._output = None
self._file_id = None
self._image_id = None
self._case_count = None
self._executed_count = None
self._testcases = None
self.discriminator = None
self.output = output
self.file_id = file_id
self.image_id = image_id
self.case_count = case_count
self.executed_count = executed_count
self.testcases = testcases
@property
def output(self):
"""Gets the output of this JudgementResult.
标准类型输出结果
:return: The output of this JudgementResult.
:rtype: str
"""
return self._output
@output.setter
def output(self, output):
"""Sets the output of this JudgementResult.
标准类型输出结果
:param output: The output of this JudgementResult.
:type: str
"""
self._output = output
@property
def file_id(self):
"""Gets the file_id of this JudgementResult.
文件形式输出的文件id,可根据文件id下载详情
:return: The file_id of this JudgementResult.
:rtype: str
"""
return self._file_id
@file_id.setter
def file_id(self, file_id):
"""Sets the file_id of this JudgementResult.
文件形式输出的文件id,可根据文件id下载详情
:param file_id: The file_id of this JudgementResult.
:type: str
"""
self._file_id = file_id
@property
def image_id(self):
"""Gets the image_id of this JudgementResult.
图片形式输出的图片id,可根据图片id下载详情
:return: The image_id of this JudgementResult.
:rtype: str
"""
return self._image_id
@image_id.setter
def image_id(self, image_id):
"""Sets the image_id of this JudgementResult.
图片形式输出的图片id,可根据图片id下载详情
:param image_id: The image_id of this JudgementResult.
:type: str
"""
self._image_id = image_id
@property
def case_count(self):
"""Gets the case_count of this JudgementResult.
用例形式输出的用例总个数
:return: The case_count of this JudgementResult.
:rtype: int
"""
return self._case_count
@case_count.setter
def case_count(self, case_count):
"""Sets the case_count of this JudgementResult.
用例形式输出的用例总个数
:param case_count: The case_count of this JudgementResult.
:type: int
"""
self._case_count = case_count
@property
def executed_count(self):
"""Gets the executed_count of this JudgementResult.
用例形式输出的已执行用例的个数
:return: The executed_count of this JudgementResult.
:rtype: int
"""
return self._executed_count
@executed_count.setter
def executed_count(self, executed_count):
"""Sets the executed_count of this JudgementResult.
用例形式输出的已执行用例的个数
:param executed_count: The executed_count of this JudgementResult.
:type: int
"""
self._executed_count = executed_count
@property
def testcases(self):
"""Gets the testcases of this JudgementResult.
用例形式输出的已执行用例的结果
:return: The testcases of this JudgementResult.
:rtype: list[JudgementCaseResult]
"""
return self._testcases
@testcases.setter
def testcases(self, testcases):
"""Sets the testcases of this JudgementResult.
用例形式输出的已执行用例的结果
:param testcases: The testcases of this JudgementResult.
:type: list[JudgementCaseResult]
"""
self._testcases = testcases
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JudgementResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 2.421875
| 2
|
fish_diff.py
|
seigo-pon/ai_tutorial
| 0
|
12780506
|
<reponame>seigo-pon/ai_tutorial
import cv2
import os
import shutil
img_last = None
idx = 0
save_dir = './fish'
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
cap = cv2.VideoCapture('./fish.mp4')
while True:
success, frame = cap.read()
if not success:
break
frame = cv2.resize(frame, (640, 360))
# 二値化
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (15, 15), 0)
img_b = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)[1]
if not img_last is None:
# 差分
frame_diff = cv2.absdiff(img_last, img_b)
cnts = cv2.findContours(frame_diff, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
# 差分を保存
for pt in cnts:
x, y, w, h = cv2.boundingRect(pt)
if w < 100 or w > 500:
continue
img = frame[y:y+h, x:x+w]
cv2.imwrite(os.path.join(save_dir, f'{idx}.jpg'), img)
idx += 1
img_last = img_b
cap.release()
| 2.515625
| 3
|
compyler/node.py
|
dgisolfi/Compyler
| 0
|
12780507
|
<filename>compyler/node.py
#!/usr/bin/python3
# 2019-2-12
# <NAME>
class Node:
def __init__(self, name, parent, nid, kind, line, pos):
self.__name = name
self.__children = []
self.__parent = parent
# id for treelib to use
self.__nid = nid
# to hold kind of node (leaf or branch)
self.__kind = kind
self.__line = line
self.__position = pos
# Getters for all attributes
@property
def name(self):
return self.__name
@name.setter
def name(self, name):
self.__name = name
@property
def children(self):
return self.__children
@property
def parent(self):
return self.__parent
@property
def nid(self):
return self.__nid
@property
def kind(self):
return self.__kind
@property
def line(self):
return self.__line
@property
def position(self):
return self.__position
# Setters
@parent.setter
def setParent(self, parent):
self.__parent = parent
@children.setter
def append(self, child):
self.__children.append(child)
| 2.859375
| 3
|
ex6.py
|
ayesumon123/python-exercises
| 0
|
12780508
|
<filename>ex6.py
types_of_people = 10
x = f"There are {types_of_people} types of people."
binary = "binary"
do_not = "don't"
y = f"Those who know {binary} and those who {do_not}."
print(x)
print(y)
print(f"I said: {x}")
print(f"I also said: '{y}'")
hilarious = False
| 3.203125
| 3
|
examples/__init__.py
|
mirakels/pandevice
| 34
|
12780509
|
<reponame>mirakels/pandevice
__author__ = 'btorres-gil'
| 1.09375
| 1
|
test/test_pulljson.py
|
Teradata/PyTd
| 133
|
12780510
|
<filename>test/test_pulljson.py
# The MIT License (MIT)
#
# Copyright (c) 2015 by Teradata
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from teradata import pulljson
import unittest
import sys
if sys.version_info[0] == 2:
from StringIO import StringIO # @UnresolvedImport #@UnusedImport
else:
from io import StringIO # @UnresolvedImport @UnusedImport @Reimport
class TestJSONPullParser (unittest.TestCase):
def testNextEvent(self):
stream = StringIO("""{"key1":"value", "key2":100, "key3":null,
"key4": true, "key5":false, "key6":-201.50E1, "key7":{"key8":"value2",
"key9":null}, "key10":["value3", 10101010101010101010101, null,
{} ] }""")
reader = pulljson.JSONPullParser(stream)
# Start of object
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
# Key1 - "value"
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key1")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_VALUE)
self.assertEqual(event.value, "value")
self.assertEqual(event.valueType, pulljson.STRING)
# Key2 - 100
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key2")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_VALUE)
self.assertEqual(event.value, 100)
self.assertEqual(event.valueType, pulljson.NUMBER)
# Key3 - null
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key3")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_VALUE)
self.assertIsNone(event.value)
self.assertEqual(event.valueType, pulljson.NULL)
# Key4 - true
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key4")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_VALUE)
self.assertTrue(event.value)
self.assertEqual(event.valueType, pulljson.BOOLEAN)
# Key5 - false
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key5")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_VALUE)
self.assertFalse(event.value)
self.assertEqual(event.valueType, pulljson.BOOLEAN)
# Key6
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key6")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_VALUE)
self.assertEqual(event.value, -2015)
self.assertEqual(event.valueType, pulljson.NUMBER)
# Key7
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key7")
# Start of key7 object
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
# Key8 - value2
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key8")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_VALUE)
self.assertEqual(event.value, "value2")
self.assertEqual(event.valueType, pulljson.STRING)
# Key9 - null
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key9")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_VALUE)
self.assertIsNone(event.value)
# End of key7 object
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.END_OBJECT)
# Key10 - array[0] - value3
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key10")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_ARRAY)
# Key10 - array[0] - value3
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.ARRAY_VALUE)
self.assertEqual(event.value, "value3")
self.assertEqual(event.valueType, pulljson.STRING)
self.assertEqual(event.arrayIndex, 0)
# Key10 - array[1] - 10101010101010101010101
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.ARRAY_VALUE)
self.assertEqual(event.value, 10101010101010101010101)
self.assertEqual(event.valueType, pulljson.NUMBER)
self.assertEqual(event.arrayIndex, 1)
# Key10 - array[2] - null
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.ARRAY_VALUE)
self.assertIsNone(event.value)
self.assertEqual(event.valueType, pulljson.NULL)
self.assertEqual(event.arrayIndex, 2)
# Key10 - array[3] - object
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
self.assertEqual(event.arrayIndex, 3)
# Key10 - array[3] - object
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.END_OBJECT)
self.assertEqual(event.arrayIndex, 3)
# End of key 10 array.
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.END_ARRAY)
# End of object
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.END_OBJECT)
event = reader.nextEvent()
self.assertIsNone(event)
def testDocumentIncomplete(self):
stream = StringIO('{"key":"value"')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key")
with self.assertRaises(pulljson.JSONParseError) as cm:
event = reader.nextEvent()
self.assertEqual(
cm.exception.code, pulljson.JSON_INCOMPLETE_ERROR,
cm.exception.msg)
def testEmptyName(self):
stream = StringIO('{:"value"}')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
with self.assertRaises(pulljson.JSONParseError) as cm:
event = reader.nextEvent()
self.assertEqual(
cm.exception.code, pulljson.JSON_SYNTAX_ERROR, cm.exception.msg)
def testExtraWhiteSpace(self):
stream = StringIO('{\n\t "key"\n\t\t: "\t value\n"} ')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_VALUE)
self.assertEqual(event.value, "\t value\n")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.END_OBJECT)
event = reader.nextEvent()
self.assertIsNone(event)
def testEscapeCharacter(self):
stream = StringIO('{"\\"ke\\"y\\\\" : "va\\"l\\"ue"} ')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, '"ke"y\\')
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_VALUE)
self.assertEqual(event.value, 'va"l"ue')
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.END_OBJECT)
event = reader.nextEvent()
self.assertIsNone(event)
def testEmptyArray(self):
stream = StringIO('[]')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_ARRAY)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.END_ARRAY)
event = reader.nextEvent()
self.assertIsNone(event)
def testMissingColon(self):
stream = StringIO('{"key" "value"}')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
with self.assertRaises(pulljson.JSONParseError) as cm:
event = reader.nextEvent()
self.assertEqual(
cm.exception.code, pulljson.JSON_SYNTAX_ERROR, cm.exception.msg)
def testCommaInsteadOfColon(self):
stream = StringIO('{"key","value"}')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
with self.assertRaises(pulljson.JSONParseError) as cm:
event = reader.nextEvent()
self.assertEqual(
cm.exception.code, pulljson.JSON_SYNTAX_ERROR, cm.exception.msg)
def testColonInsteadOfComma(self):
stream = StringIO('["key":"value"]')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_ARRAY)
with self.assertRaises(pulljson.JSONParseError) as cm:
event = reader.nextEvent()
self.assertEqual(
cm.exception.code, pulljson.JSON_SYNTAX_ERROR, cm.exception.msg)
def testNumberLiteral(self):
stream = StringIO('1')
reader = pulljson.JSONPullParser(stream)
with self.assertRaises(pulljson.JSONParseError) as cm:
reader.nextEvent()
self.assertEqual(
cm.exception.code, pulljson.JSON_SYNTAX_ERROR, cm.exception.msg)
def testStringLiteral(self):
stream = StringIO('"This is a test"')
reader = pulljson.JSONPullParser(stream)
with self.assertRaises(pulljson.JSONParseError) as cm:
reader.nextEvent()
self.assertEqual(
cm.exception.code, pulljson.JSON_SYNTAX_ERROR, cm.exception.msg)
def testObjectMissingValue(self):
stream = StringIO('{"key":}')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
with self.assertRaises(pulljson.JSONParseError) as cm:
event = reader.nextEvent()
self.assertEqual(
cm.exception.code, pulljson.JSON_SYNTAX_ERROR, cm.exception.msg)
def testArrayMissingValue(self):
stream = StringIO('[1, ,2}')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_ARRAY)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.ARRAY_VALUE)
with self.assertRaises(pulljson.JSONParseError) as cm:
event = reader.nextEvent()
self.assertEqual(
cm.exception.code, pulljson.JSON_SYNTAX_ERROR, cm.exception.msg)
def testArrayInObject(self):
stream = StringIO('{[]}')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
with self.assertRaises(pulljson.JSONParseError) as cm:
event = reader.nextEvent()
self.assertEqual(
cm.exception.code, pulljson.JSON_SYNTAX_ERROR, cm.exception.msg)
def testReadObject(self):
stream = StringIO(
'{"key1":[0,1,2,3,4,{"value":"5"}], "key2":\
{"key1":[0,1,2,3,4,{"value":"5"}]}}')
reader = pulljson.JSONPullParser(stream)
obj = reader.readObject()
self.assertEqual(len(obj), 2)
for i in range(0, 2):
self.assertEqual(len(obj["key1"]), 6)
for i in range(0, 5):
self.assertEqual(obj["key1"][i], i)
self.assertEqual(obj["key1"][5]["value"], "5")
if i == 1:
obj = obj["key2"]
self.assertEqual(len(obj), 1)
def testReadArray(self):
stream = StringIO('[0,1,2,3,4,[0,1,2,3,4,[0,1,2,3,4]],[0,1,2,3,4]]')
reader = pulljson.JSONPullParser(stream)
arr = reader.readArray()
self.assertEqual(len(arr), 7)
for i in range(0, 5):
self.assertEqual(arr[i], i)
for i in range(0, 5):
self.assertEqual(arr[5][i], i)
for i in range(0, 5):
self.assertEqual(arr[5][5][i], i)
for i in range(0, 5):
self.assertEqual(arr[6][i], i)
def testArraySyntaxError(self):
stream = StringIO('[[0,1][0,1]]')
reader = pulljson.JSONPullParser(stream)
with self.assertRaises(pulljson.JSONParseError) as cm:
reader.readArray()
self.assertEqual(
cm.exception.code, pulljson.JSON_SYNTAX_ERROR, cm.exception.msg)
def testIterateArray(self):
stream = StringIO(
'[{"key0}":["}\\"","\\"}","}"]}, {"key1}":["}","\\"}","}"]}, '
'{"key2}":["}","}","\\"}"]}]')
reader = pulljson.JSONPullParser(stream)
i = 0
for x in reader.expectArray():
self.assertEqual(len(x["key" + str(i) + "}"]), 3)
i += 1
if __name__ == '__main__':
unittest.main()
| 1.929688
| 2
|
tests/test_user.py
|
samsonosiomwan/SQL-model
| 0
|
12780511
|
<reponame>samsonosiomwan/SQL-model<filename>tests/test_user.py
import unittest
import sys
sys.path.append('src/models')
from user import User
from datetime import datetime
class TestUser(unittest.TestCase):
def setUp(self):
self.user = User()
def test_all(self):
self.assertIsInstance(self.user.all(),list)
self.assertIsNotNone(self.user.all())
def test_get(self):
self.assertEqual(len(self.user.get(1)), 6)
self.assertIsNotNone(self.user.get(1))
def test_create(self):
self.assertIsInstance(self.user.create('samson1','vero','ola'),tuple)
self.assertEqual(len(self.user.create('samson1','vero','ola')), 6)
self.assertIsNotNone(self.user.create('samson1','vero','ola'))
def test_update(self):
update_time = datetime.now()
self.assertIsNotNone(self.user.update(33,'samson1','vero','ola'))
def test_destroy(self):
user_table_length = len(self.user.all())
self.assertIsNotNone(self.user.destroy(1))
self.assertTrue(user_table_length -1 != user_table_length)
def tearDown(self):
self.user = None
| 2.765625
| 3
|
tests/eth2/core/beacon/state_machines/forks/test_serenity_block_processing.py
|
dendisuhubdy/trinity
| 0
|
12780512
|
<gh_stars>0
from eth.constants import ZERO_HASH32
from eth_utils import ValidationError
from eth_utils.toolz import concat, first, mapcat
import pytest
from eth2._utils.bls import bls
from eth2.beacon.helpers import compute_start_slot_at_epoch, get_domain
from eth2.beacon.signature_domain import SignatureDomain
from eth2.beacon.state_machines.forks.serenity.block_processing import (
process_eth1_data,
process_randao,
)
from eth2.beacon.state_machines.forks.serenity.blocks import SerenityBeaconBlock
from eth2.beacon.state_machines.forks.serenity.states import SerenityBeaconState
from eth2.beacon.tools.builder.initializer import create_mock_validator
from eth2.beacon.tools.builder.proposer import _generate_randao_reveal
from eth2.beacon.types.blocks import BeaconBlock, BeaconBlockBody
from eth2.beacon.types.eth1_data import Eth1Data
from eth2.beacon.types.states import BeaconState
def test_randao_processing(
sample_beacon_block_params,
sample_beacon_block_body_params,
sample_beacon_state_params,
keymap,
config,
):
proposer_pubkey, proposer_privkey = first(keymap.items())
state = SerenityBeaconState.create(**sample_beacon_state_params).mset(
"validators",
tuple(
create_mock_validator(proposer_pubkey, config)
for _ in range(config.TARGET_COMMITTEE_SIZE)
),
"balances",
(config.MAX_EFFECTIVE_BALANCE,) * config.TARGET_COMMITTEE_SIZE,
"randao_mixes",
tuple(ZERO_HASH32 for _ in range(config.EPOCHS_PER_HISTORICAL_VECTOR)),
)
epoch = state.current_epoch(config.SLOTS_PER_EPOCH)
slot = compute_start_slot_at_epoch(epoch, config.SLOTS_PER_EPOCH)
randao_reveal = _generate_randao_reveal(
privkey=proposer_privkey, slot=slot, state=state, config=config
)
block_body = BeaconBlockBody.create(**sample_beacon_block_body_params).set(
"randao_reveal", randao_reveal
)
block = SerenityBeaconBlock.create(**sample_beacon_block_params).set(
"body", block_body
)
new_state = process_randao(state, block, config)
updated_index = epoch % config.EPOCHS_PER_HISTORICAL_VECTOR
original_mixes = state.randao_mixes
updated_mixes = new_state.randao_mixes
assert all(
updated == original if index != updated_index else updated != original
for index, (updated, original) in enumerate(zip(updated_mixes, original_mixes))
)
def test_randao_processing_validates_randao_reveal(
sample_beacon_block_params,
sample_beacon_block_body_params,
sample_beacon_state_params,
sample_fork_params,
keymap,
config,
):
proposer_pubkey, proposer_privkey = first(keymap.items())
state = SerenityBeaconState.create(**sample_beacon_state_params).mset(
"validators",
tuple(
create_mock_validator(proposer_pubkey, config)
for _ in range(config.TARGET_COMMITTEE_SIZE)
),
"balances",
(config.MAX_EFFECTIVE_BALANCE,) * config.TARGET_COMMITTEE_SIZE,
"randao_mixes",
tuple(ZERO_HASH32 for _ in range(config.EPOCHS_PER_HISTORICAL_VECTOR)),
)
epoch = state.current_epoch(config.SLOTS_PER_EPOCH)
message_hash = (epoch + 1).to_bytes(32, byteorder="little")
domain = get_domain(state, SignatureDomain.DOMAIN_RANDAO, config.SLOTS_PER_EPOCH)
randao_reveal = bls.sign(message_hash, proposer_privkey, domain)
block_body = BeaconBlockBody.create(**sample_beacon_block_body_params).set(
"randao_reveal", randao_reveal
)
block = SerenityBeaconBlock.create(**sample_beacon_block_params).set(
"body", block_body
)
with pytest.raises(ValidationError):
process_randao(state, block, config)
HASH1 = b"\x11" * 32
HASH2 = b"\x22" * 32
def _expand_eth1_votes(args):
block_hash, vote_count = args
return (Eth1Data.create(block_hash=block_hash),) * vote_count
@pytest.mark.parametrize(
("original_votes", "block_data", "expected_votes"),
(
((), HASH1, ((HASH1, 1),)),
(((HASH1, 5),), HASH1, ((HASH1, 6),)),
(((HASH2, 5),), HASH1, ((HASH2, 5), (HASH1, 1))),
(((HASH1, 10), (HASH2, 2)), HASH2, ((HASH1, 10), (HASH2, 3))),
),
)
def test_process_eth1_data(
original_votes,
block_data,
expected_votes,
sample_beacon_state_params,
sample_beacon_block_params,
sample_beacon_block_body_params,
config,
):
eth1_data_votes = tuple(mapcat(_expand_eth1_votes, original_votes))
state = BeaconState.create(**sample_beacon_state_params).set(
"eth1_data_votes", eth1_data_votes
)
block_body = BeaconBlockBody.create(**sample_beacon_block_body_params).mset(
"eth1_data", Eth1Data.create(block_hash=block_data)
)
block = BeaconBlock.create(**sample_beacon_block_params).set("body", block_body)
updated_state = process_eth1_data(state, block, config)
updated_votes = updated_state.eth1_data_votes
expanded_expected_votes = tuple(mapcat(_expand_eth1_votes, expected_votes))
assert tuple(updated_votes) == expanded_expected_votes
@pytest.mark.parametrize(("slots_per_eth1_voting_period"), ((16),))
@pytest.mark.parametrize(
("vote_offsets"), # a tuple of offsets against the majority threshold
(
# no eth1_data_votes
(),
# a minority of eth1_data_votes (single)
(-2,),
# a plurality of eth1_data_votes (multiple but not majority)
(-2, -2),
# almost a majority!
(0,),
# a majority of eth1_data_votes
(1,),
(7,),
# NOTE: we are accepting more than one block per slot if
# there are multiple majorities so no need to test this
),
)
def test_ensure_update_eth1_vote_if_exists(genesis_state, config, vote_offsets):
# one less than a majority is the majority divided by 2
threshold = config.SLOTS_PER_ETH1_VOTING_PERIOD // 2
data_votes = tuple(
concat(
(Eth1Data.create(block_hash=(i).to_bytes(32, "little")),)
* (threshold + offset)
for i, offset in enumerate(vote_offsets)
)
)
state = genesis_state
for vote in data_votes:
state = process_eth1_data(
state,
BeaconBlock.create(body=BeaconBlockBody.create(eth1_data=vote)),
config,
)
if not vote_offsets:
assert state.eth1_data == genesis_state.eth1_data
# we should update the 'latest' entry if we have a majority
for offset in vote_offsets:
if offset <= 0:
assert genesis_state.eth1_data == state.eth1_data
else:
assert state.eth1_data == data_votes[0]
| 1.882813
| 2
|
etc/defaults/pvAccessCPP.py
|
dls-controls/ADCore
| 0
|
12780513
|
<reponame>dls-controls/ADCore
# Builder definitions for pvAccessCPP
import iocbuilder
from iocbuilder import Device
class pvAccessCPP(Device):
LibFileList = ['pvAccess', 'pvAccessIOC', 'pvAccessCA']
DbdFileList = ['PVAServerRegister']
AutoInstantiate = True
| 1.601563
| 2
|
src/blockdiag/imagedraw/__init__.py
|
flying-foozy/blockdiag
| 155
|
12780514
|
<filename>src/blockdiag/imagedraw/__init__.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pkg_resources
from blockdiag.utils.logging import warning
drawers = {}
def init_imagedrawers(debug=False):
for drawer in pkg_resources.iter_entry_points('blockdiag_imagedrawers'):
try:
module = drawer.load()
if hasattr(module, 'setup'):
module.setup(module)
except Exception as exc:
if debug:
warning('Failed to load %s: %r' % (drawer.module_name, exc))
def install_imagedrawer(ext, drawer):
drawers[ext] = drawer
def create(_format, filename, **kwargs):
if len(drawers) == 0:
init_imagedrawers(debug=kwargs.get('debug'))
_format = _format.lower()
if _format in drawers:
drawer = drawers[_format](filename, **kwargs)
else:
msg = 'failed to load %s image driver' % _format
raise RuntimeError(msg)
if 'linejump' in kwargs.get('filters', []):
from blockdiag.imagedraw.filters.linejump import LineJumpDrawFilter
jumpsize = kwargs.get('jumpsize', 0)
drawer = LineJumpDrawFilter(drawer, jumpsize)
return drawer
| 2.109375
| 2
|
Lab1/dbManager.py
|
yzghurovskyi/RelationalDBsLabs
| 0
|
12780515
|
import math
import os
from random import random
import psycopg2
import psycopg2.extras
from config.config import config
from models.Club import Club
from models.Player import Player
from models.Position import Position
from models.Tournament import Tournament
from faker import Faker
class FootballDatabase(object):
def __init__(self):
self.conn = None
def exec_script_file(self, script_file_name: str) -> None:
script_file = open('{0}\scripts\{1}'.format(os.path.dirname(__file__), script_file_name), 'r')
with self.get_cursor() as cur:
cur.execute(script_file.read())
self.conn.commit()
def get_cursor(self):
return self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
def connect(self) -> None:
try:
# read connection parameters
params = config()
# connect to the PostgreSQL server
self.conn = psycopg2.connect(**params)
except (Exception, psycopg2.DatabaseError) as error:
print(error)
def close_connection(self) -> None:
if self.conn is not None:
self.conn.close()
print('Database connection closed.')
def generate_random_players(self):
fake = Faker()
script = """INSERT INTO players(first_name, last_name, date_of_birth, is_injured, height, position_id, club_id)
VALUES(%s, %s, %s, %s, %s, %s, (SELECT club_id FROM clubs ORDER BY random() LIMIT 1));"""
with self.get_cursor() as cur:
for i in range(1000):
cur.execute(script, [fake.first_name_male(),
fake.last_name_male(),
fake.date_of_birth(tzinfo=None, minimum_age=17, maximum_age=35),
random() > 0.5,
math.ceil(random() * 39 + 160),
math.ceil(random() * 3 + 1)])
def generate_random_clubs(self):
fake = Faker()
clubs_amount = 100
club_names = fake.words(nb=clubs_amount, ext_word_list=None, unique=True)
script = """INSERT INTO clubs(name, creation_date, number_of_trophies) VALUES (%s, %s, %s);"""
with self.get_cursor() as cur:
for i in range(clubs_amount):
cur.execute(script, [club_names[i],
fake.date_of_birth(tzinfo=None, minimum_age=5, maximum_age=200),
math.ceil(random() * 29 + 1)])
def generate_random_tournaments(self):
fake = Faker()
script = """INSERT INTO tournaments(name, description) VALUES (%s, %s);"""
with self.get_cursor() as cur:
for i in range(20):
cur.execute(script, [fake.word(), fake.text()])
def text_search_by_words(self, words: list) -> list:
search_words = ' & '.join(words)
script = """SELECT id, ts_headline('english', description, q) description, name
FROM (SELECT tournament_id id, description, name, q
FROM tournaments, to_tsquery('english', %s) q
WHERE tsv @@ q) AS t;"""
with self.get_cursor() as cur:
cur.execute(script, [search_words])
tournaments = cur.fetchall()
return [Tournament(id=t['id'], name=t['name'], description=t['description']) for t in tournaments]
def text_search_by_phrase(self, phrase: str) -> list:
script = """SELECT id, ts_headline('english', description, q) description, name
FROM (SELECT tournament_id id, description, name, q
FROM tournaments, phraseto_tsquery('english', %s) q
WHERE tsv @@ q) AS t;"""
with self.get_cursor() as cur:
cur.execute(script, [phrase])
tournaments = cur.fetchall()
return [Tournament(id=t['id'], name=t['name'], description=t['description']) for t in tournaments]
def advanced_player_search(self,
min_height: int,
max_height: int,
min_number: int,
max_number: int,
position_id: int) -> list:
script = """
SELECT p.first_name, p.last_name, p.height, c.name as club , c.number_of_trophies, pos.name as position
FROM players p
JOIN clubs c
ON p.club_id = c.club_id
JOIN positions pos
ON p.position_id = pos.position_id
WHERE (p.height BETWEEN %s AND %s)
AND (c.number_of_trophies BETWEEN %s AND %s)
AND p.position_id = %s;"""
with self.get_cursor() as cur:
cur.execute(script, [min_height, max_height, min_number, max_number, position_id])
rows = cur.fetchall()
return [(Club(name=r['club'], number_of_trophies=r['number_of_trophies']),
Player(first_name=r['first_name'], last_name=r['last_name'], height=r['height']),
Position(name=r['position'])) for r in rows]
# region Positions operations
def get_positions(self):
script = """
SELECT position_id, name
FROM positions"""
with self.get_cursor() as cur:
cur.execute(script)
positions = cur.fetchall()
return [Position(id=p['position_id'], name=p['name']) for p in positions]
# endregion
# region Players operations
def get_player(self, player_id: int) -> Player:
script = """
SELECT * FROM players
WHERE player_id = %s"""
with self.get_cursor() as cur:
cur.execute(script, [player_id])
db_player = cur.fetchone()
return Player(id=db_player['player_id'], first_name=db_player['first_name'], last_name=db_player['last_name'],
date_of_birth=db_player['date_of_birth'], is_injured=db_player['is_injured'],
height=db_player['height'], club_id=db_player['club_id'], position_id=db_player['position_id'])
def get_players(self) -> list:
with self.get_cursor() as cur:
cur.execute('SELECT player_id, first_name, last_name FROM players')
db_players = cur.fetchall()
return [Player(id=p['player_id'], first_name=p['first_name'], last_name=p['last_name']) for p in db_players]
def get_players_by_club(self, club_id) -> list:
script = """
SELECT p.first_name, p.last_name
FROM players AS p
WHERE p.club_id = %s"""
with self.get_cursor() as cur:
cur.execute(script, [club_id])
db_players = cur.fetchall()
return [Player(first_name=p['first_name'], last_name=p['last_name']) for p in db_players]
def get_players_free(self):
script = """
SELECT p.player_id, p.first_name, p.last_name
FROM players p
WHERE club_id is NULL"""
with self.get_cursor() as cur:
cur.execute(script)
db_players = cur.fetchall()
return [Player(first_name=p['first_name'], last_name=p['last_name'], id=p['player_id']) for p in db_players]
def add_player(self, player: Player) -> None:
insert_script = """
INSERT INTO players (first_name, last_name, date_of_birth, is_injured, position_id, height, club_id)
VALUES (%s, %s, %s, %s, %s, %s, %s)"""
insert_data = (player.first_name,
player.last_name,
player.date_of_birth,
player.is_injured,
player.position_id,
player.height,
player.club_id)
with self.get_cursor() as cur:
cur.execute(insert_script, insert_data)
self.conn.commit()
def update_player(self, player: Player) -> None:
update_script = """
UPDATE players
SET (first_name, last_name, date_of_birth, is_injured, position_id, height ,club_id) =
(%s, %s, %s, %s, %s, %s, %s)
WHERE player_id = %s;"""
update_data = (player.first_name, player.last_name,
player.date_of_birth, player.is_injured,
player.position_id, player.height, player.club_id, player.id)
with self.get_cursor() as cur:
cur.execute(update_script, update_data)
self.conn.commit()
def update_players_club(self, club_id: int, player_ids: list):
update_player = """
UPDATE players
SET club_id = %s
WHERE player_id IN %s"""
if player_ids:
with self.get_cursor() as cur:
cur.execute(update_player, (club_id, tuple(player_ids),))
self.conn.commit()
def delete_player(self, player_id: int) -> None:
delete_script = """DELETE FROM players WHERE player_id=%s;"""
with self.get_cursor()as cur:
cur.execute(delete_script, [player_id])
self.conn.commit()
# endregion
# region Club operations
def get_club(self, club_id: int) -> Club:
with self.get_cursor() as cur:
cur.execute('SELECT * FROM clubs WHERE club_id = {0}'.format(club_id))
c = cur.fetchone()
return Club(id=c['club_id'],
name=c['name'],
creation_date=c['creation_date'],
number_of_trophies=c['number_of_trophies'])
def get_clubs(self) -> list:
with self.get_cursor() as cur:
cur.execute('SELECT club_id as id, name FROM clubs')
db_clubs = cur.fetchall()
return [Club(id=c['id'], name=c['name']) for c in db_clubs]
def add_club(self, club: Club) -> int:
insert_script = """
INSERT INTO clubs (name, creation_date, number_of_trophies)
VALUES (%s, %s, %s) RETURNING club_id;"""
insert_data = (club.name, club.creation_date, club.number_of_trophies)
with self.get_cursor() as cur:
cur.execute(insert_script, insert_data)
new_id = cur.fetchone()[0]
self.conn.commit()
return new_id
def update_club(self, club: Club) -> None:
update_script = """
UPDATE clubs
SET (name, creation_date, number_of_trophies) = (%s, %s, %s)
WHERE club_id = %s;"""
update_data = (club.name, club.creation_date, club.number_of_trophies, club.id)
with self.get_cursor() as cur:
cur.execute(update_script, update_data)
self.conn.commit()
def delete_club(self, club_id: int) -> None:
delete_script = """DELETE FROM clubs WHERE club_id=%s;"""
with self.get_cursor() as cur:
cur.execute(delete_script, [club_id])
self.conn.commit()
def get_clubs_by_tournament(self, tournament_id: int):
script = """
SELECT c.club_id as id, c.name as name FROM clubs c
JOIN clubs_tournaments ct
ON ct.club_id = c.club_id
WHERE ct.tournament_id = %s"""
with self.get_cursor() as cur:
cur.execute(script, [tournament_id])
clubs = cur.fetchall()
self.conn.commit()
return [Club(id=c['id'], name=c['name']) for c in clubs]
def get_clubs_not_in_tournament(self, tournament_id: int):
script = """
SELECT c.club_id as id, c.name as name FROM clubs c
WHERE c.club_id NOT IN(
SELECT c.club_id FROM clubs c
JOIN clubs_tournaments ct
ON ct.club_id = c.club_id
WHERE ct.tournament_id = %s)"""
with self.get_cursor() as cur:
cur.execute(script, [tournament_id])
clubs = cur.fetchall()
return [Club(id=c['id'], name=c['name']) for c in clubs]
def add_clubs_to_tournament(self, tournament_id: int, club_ids: list):
data = [(cid, tournament_id) for cid in club_ids]
script = """INSERT INTO clubs_tournaments(club_id, tournament_id) VALUES %s"""
with self.get_cursor() as cur:
psycopg2.extras.execute_values(cur, script, data, template=None, page_size=100)
self.conn.commit()
# endregion
# region Tournament operations
def get_tournament(self, tournament_id: int) -> Tournament:
with self.get_cursor() as cur:
cur.execute('SELECT * FROM tournaments WHERE tournament_id = {0}'.format(tournament_id))
t = cur.fetchone()
return Tournament(id=t['tournament_id'], name=t['name'], description=t['description'])
def get_tournaments(self) -> list:
with self.get_cursor() as cur:
cur.execute('SELECT tournament_id as id, name FROM tournaments')
db_tournaments = cur.fetchall()
return [Tournament(id=t['id'], name=t['name']) for t in db_tournaments]
def delete_tournament(self, tournament_id: int) -> None:
delete_script = """DELETE FROM tournaments WHERE tournament_id=%s;"""
with self.get_cursor() as cur:
cur.execute(delete_script, [tournament_id])
self.conn.commit()
def add_tournament(self, tournament: Tournament) -> int:
insert_script = """
INSERT INTO tournaments (name, description)
VALUES (%s, %s) RETURNING tournament_id"""
insert_data = (tournament.name, tournament.description)
with self.get_cursor() as cur:
cur.execute(insert_script, insert_data)
new_id = cur.fetchone()[0]
self.conn.commit()
return new_id
def update_tournament(self, tournament: Tournament) -> None:
update_script = """
UPDATE tournaments
SET (name, description) = (%s, %s)
WHERE tournament_id = %s;"""
update_data = (tournament.name, tournament.description, tournament.id)
with self.get_cursor() as cur:
cur.execute(update_script, update_data)
self.conn.commit()
def get_tournaments_by_club(self, club_id: int):
script = """
SELECT t.tournament_id as id, t.name as name FROM tournaments t
JOIN clubs_tournaments ct
ON t.tournament_id = ct.tournament_id
WHERE ct.club_id = %s"""
with self.get_cursor() as cur:
cur.execute(script, [club_id])
tournaments = cur.fetchall()
return [Tournament(id=t['id'], name=t['name']) for t in tournaments]
# endregion
| 2.921875
| 3
|
sonar/ping.py
|
allenai/allennlp-gallery
| 3
|
12780516
|
# -*- coding: utf-8 -*-
import requests
import time
import math
import signal
def is_ok(url: str) -> bool:
"""
Returns True if the provided URL responds with a 2XX when fetched via
a HTTP GET request.
"""
try:
resp = requests.get(url)
except:
return False
return True if math.floor(resp.status_code / 100) == 2 else False
def scan():
"""
Broadcasts the availability of the proxy's HTTP server once both the
API and UI are ready for traffic.
This script exists solely to ease confusion locally, as both Flask and
the HTTP server bundled with `create-react-app` output logs telling the
user about the ports they're bound to (even though they're inaccessible).
"""
print("")
print("⚓️ Ahoy!")
print("")
print(
"Your application is starting and will be available at " +
"http://localhost:8080 when it's ready."
)
print("")
# If someone tries to cancel the `docker-compose up` invocation, docker
# will send a SIGTERM to the program. We need to handle this and set a
# value that allows the loop to be broken.
term = False
def handle_interrupt(signal_number, stack_frame):
global term
term = True
signal.signal(signal.SIGTERM, handle_interrupt)
last_check = time.perf_counter()
is_app_live = False
while (is_app_live != True):
if term is True:
break
# We don't use `time.sleep()`, as that'd prevent us from being able
# to break the loop quickly in the event of a SIGTERM.
now = time.perf_counter()
if (now - last_check >= 5):
last_check = now
if not is_app_live:
is_app_live = is_ok("http://app:8000")
if is_app_live:
print("")
print("✨ Your local environment is ready:")
print("")
print(" http://localhost:8080")
print("")
print("⛵️ Smooth sailing!")
print("")
if __name__ == "__main__":
scan()
| 3.015625
| 3
|
src/automata.py
|
ezequielbrrt/ModeloSIR-AutomataCelular
| 0
|
12780517
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pylab import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import subprocess
#Creacion de la Poblacion
datos = open("Datos/Poblacion.txt","w")
datos.close()
datos = open("Datos/Estados.txt","w")
datos.close()
#generacion de coordenadas
contador = 0
for x in range(1,2000):
longitud = np.random.uniform(-108,-85,1)
latitud = np.random.uniform(14.5,25,1)
lon = longitud[0]
lat = latitud[0]
#poniendo limites
if lat < 16.3 and lon < -92.38:
pass
elif lat < 25 and lat > 18.119 and lon < -90.4 and lon > -97 :
pass
elif lon > -88 and lat > 16:
pass
elif lat > 24 and lon > -91:
pass
elif lat < 23.7 and lon < -105.5:
pass
elif lat < 18.27 and lon < -101:
pass
elif lat > 20.6 and lon > -98:
pass
elif lat < 24.39 and lon < -106.7:
pass
elif lat < 20.4 and lon < -105.3:
pass
elif lat < 18 and lon > -91:
pass
elif lat < 17.399 and lon < -98:
pass
elif lat < 19.7 and lon < -103.6:
pass
else:
contador = contador + 1
datos = open("Datos/Poblacion.txt","a")
datos.write(str(lat)+","
+str(lon)+"\n")
datos.close()
porcentajes = open("Datos/Datos.txt","r").read()
unidad = 0.7
inf = (float(porcentajes) * 0.7)/float(100) #rojo 2
sano = unidad - inf #amarillo 0
#generacion de estados
s = 0.3 #verde 1
r = 0.0 #azul 3
v = np.random.choice(4, contador, p=[sano, s, inf, r])
for i in v:
data = open("Datos/Estados.txt","a")
data.write(str(i)+"\n")
data.close()
mapa = subprocess.Popen([sys.executable, 'src/mapa.py'])
| 2.921875
| 3
|
retention_dashboard/models.py
|
uw-it-aca/retention-dashboard
| 0
|
12780518
|
<filename>retention_dashboard/models.py
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.db import models
from django.db.models import Q, F
class Week(models.Model):
QUARTER_CHOICES = ((1, 'Winter'),
(2, 'Spring'),
(3, 'Summer'),
(4, 'Autumn'))
number = models.IntegerField()
quarter = models.PositiveSmallIntegerField(
default=1, choices=QUARTER_CHOICES)
year = models.IntegerField()
def json_data(self):
display_string = "{} {}: Week {}".format(self.get_quarter_display(),
self.year,
self.number)
return {"value": self.id,
"year": self.year,
"quarter": self.get_quarter_display(),
"number": self.number,
"text": display_string}
@classmethod
def term_to_quarter_number(cls, term):
term = term.lower()
if term == "winter":
return 1
elif term == "spring":
return 2
elif term == "summer":
return 3
elif term == "autumn":
return 4
else:
raise ValueError(f"Unable to determine quarter number for "
f"term={term}")
@classmethod
def sis_term_to_quarter_number(cls, sis_term_id):
term = None
try:
term = sis_term_id.split("-")[1].lower()
except IndexError:
raise ValueError(f"Unable to determine term for "
f"sis_term_id={sis_term_id}")
try:
return Week.term_to_quarter_number(term)
except ValueError:
raise ValueError(f"Unable to determine quarter number for "
f"sis_term_id={sis_term_id}")
class UploadTypes():
premajor = 1
eop = 2
international = 3
iss = 4
tacoma = 5
athletic = 6
class Sport(models.Model):
sport_code = models.IntegerField()
@property
def sport_desc(self):
descs = {
1: "BASEBALL-MEN",
2: "SOFTBALL-WOMEN",
3: "BASKETBALL-MEN",
4: "BASKETBALL-WMN",
5: "CREW-MENV",
6: "CREW-WOMENV",
7: "FOOTBALL-MEN",
8: "FTBL-WOMEN",
9: "GOLF-MEN",
10: "GOLF-WOMEN",
11: "GYMN-MEN",
12: "GYMN-WOMEN",
13: "SOCR-MEN",
14: "SOCR-WOMEN",
15: "SWIM-MEN",
16: "SWIM-WOMEN",
17: "TENS-MEN",
18: "TENS-WOMEN",
19: "TRACK-MEN",
20: "TRACK-WOMEN",
22: "VLYB-WOMEN",
23: "CROSS COUNTRY-M",
24: "CROSS COUNTRY-W",
25: "TRACK-INDOOR M",
26: "TRACK-INDOOR W",
27: "CREWN-MEN",
28: "CREWN-WOMEN",
29: "NONPARTICIPANTS",
35: "NO ATH STATUS",
40: "PERM INJURY FBL",
41: "WIDE RECEIV FBL",
42: "OFFENSE LNE FBL",
43: "TEND KICKER FBL",
44: "QUARTERBACK FBL",
45: "RUNING BACK FBL",
46: "DEFENS LINE FBL",
47: "LINEBACKERS FBL",
48: "DEFENS BACK FBL",
49: "CORNER BACK FBL",
51: "INACTIVE BASEBL",
52: "INACTIVE SOFTBL",
53: "INACTIVE BASKET M",
54: "INACTIVE BASKET W",
55: "INACTIVE CREWV M",
56: "INACTIVE CREWV W",
57: "INACTIVE FTBALL",
59: "INACTIVE GOLF M",
60: "INACTIVE GOLF W",
62: "INACTIVE GYMN",
63: "INACTIVE SOC",
64: "INACT SOCCER W",
65: "INACTIVE SWIM M",
66: "INACTIVE SWIM W",
67: "INACTIVE TENNIS M",
68: "INACTIVE TENNIS W",
69: "INACTIVE TRACK M",
70: "INACTIVE TRACK W",
72: "INACTIVE VOLBL",
73: "INACTIVE CCNTRY M",
74: "INACTIVE CCNTRY W",
75: "INACT TR IND M",
76: "INACT TR IND W",
77: "INACTIVE CREMN M",
78: "INACTIVE CREWN W",
96: "REC W A AID",
97: "REC NO A AID",
98: "N REC W ATH AID",
99: "N REC NO A AID",
30: "SAND VOLLEYBL-W",
80: "INACT SAND VLB"
}
return descs[self.sport_code]
@classmethod
def get_sport_by_type(cls, datapoint_type, week):
dps = DataPoint.objects.filter(type=datapoint_type) \
.filter(week=week) \
.annotate(sport_code=F('sports__sport_code')) \
.order_by('sport_code') \
.filter(~Q(sport_code=None))
sports = []
for dp in dps:
for sport in dp.sports.all():
if (sport.sport_code not in
[sport["sport_code"] for sport in sports]):
sports.append(
{'sport_code': sport.sport_code,
'sport_desc': sport.sport_desc})
return sorted(sports, key=lambda x: x["sport_desc"])
@classmethod
def get_all_sports(cls, week_number, quarter, year):
week = Week.objects.filter(
number=week_number,
quarter=Week.term_to_quarter_number(quarter),
year=year).get()
prem = cls.get_sport_by_type(1, week)
eop = cls.get_sport_by_type(2, week)
inter = cls.get_sport_by_type(3, week)
iss = cls.get_sport_by_type(4, week)
tacoma = cls.get_sport_by_type(5, week)
athletic = cls.get_sport_by_type(6, week)
return {"Premajor": list(prem),
"EOP": list(eop),
"International": list(inter),
"ISS": list(iss),
"Tacoma": list(tacoma),
"Athletics": list(athletic)}
class DataPoint(models.Model):
TYPE_CHOICES = ((UploadTypes.premajor, "Premajor"),
(UploadTypes.eop, "EOP"),
(UploadTypes.international, "International"),
(UploadTypes.iss, "ISS"),
(UploadTypes.tacoma, "Tacoma"),
(UploadTypes.athletic, "Athletics"))
type = models.PositiveSmallIntegerField(choices=TYPE_CHOICES)
week = models.ForeignKey("Week", on_delete=models.PROTECT)
student_name = models.TextField()
student_number = models.IntegerField()
netid = models.CharField(max_length=12)
campus_code = models.CharField(max_length=2, null=True)
class_code = models.CharField(max_length=2, null=True)
premajor = models.BooleanField()
eop = models.BooleanField(default=False)
iss = models.BooleanField(default=False)
international = models.BooleanField(default=False)
is_stem = models.BooleanField(default=False)
priority_score = models.FloatField(null=True)
activity_score = models.FloatField(null=True)
assignment_score = models.FloatField(null=True)
grade_score = models.FloatField(null=True)
signin_score = models.FloatField(default=0.0)
upload = models.ForeignKey("Upload", on_delete=models.CASCADE)
advisor = models.ForeignKey("Advisor", on_delete=models.PROTECT, null=True)
sports = models.ManyToManyField("Sport")
has_a_term = models.BooleanField(default=False)
has_b_term = models.BooleanField(default=False)
has_full_term = models.BooleanField(default=False)
def get_first_last_name(self):
try:
parts = self.student_name.split(",", 1)
return parts[1], parts[0]
except IndexError:
try:
parts = self.student_name.split(" ", 1)
return parts[1], parts[0]
except IndexError:
return "", self.student_name
@staticmethod
def get_data_type_by_text(type_str):
try:
return [t for t in list(DataPoint.TYPE_CHOICES)
if t[1] == type_str][0][0]
except IndexError:
raise ValueError("Unkown type {}".format(type_str))
@staticmethod
def get_data_by_type_week(type, week):
types = [item for item in DataPoint.TYPE_CHOICES
if type in item]
if types:
type_int = [item for item in
DataPoint.TYPE_CHOICES
if type in item][0][0]
data = DataPoint.objects.filter(
type=type_int,
week=week).prefetch_related('advisor')
return data
else:
return []
@staticmethod
def filter_by_ranges(data_queryset, ranges, field):
LOW_MIN = -5
AVG_MIN = -3
HIGH_MIN = 3
HIGH_MAX = 5
field_lt = field + "__lt"
field_gt = field + "__gt"
field_lte = field + "__lte"
field_gte = field + "__gte"
queries = []
if "low" in ranges:
queries.append(Q(**{field_lte: AVG_MIN,
field_gte: LOW_MIN}))
if "avg" in ranges:
queries.append(Q(**{field_lt: HIGH_MIN,
field_gt: AVG_MIN}))
if "high" in ranges:
queries.append(Q(**{field_lte: HIGH_MAX,
field_gte: HIGH_MIN}))
query = queries.pop()
for item in queries:
query |= item
return data_queryset.filter(query)
@staticmethod
def filter_by_text(data_queryset, text):
data_queryset = \
data_queryset.filter(Q(student_name__icontains=text)
| Q(student_number__icontains=text)
| Q(netid__icontains=text))
return data_queryset
@staticmethod
def filter_by_summer(data_queryset, summer_terms):
queries = []
if "a" in summer_terms:
queries.append(Q(has_a_term=True))
if "b" in summer_terms:
queries.append(Q(has_b_term=True))
if "full" in summer_terms:
queries.append(Q(has_full_term=True))
query = queries.pop()
for item in queries:
query &= item
return data_queryset.filter(query)
@staticmethod
def filter_by_premajor(data_queryset, is_premajor):
return data_queryset.filter(premajor=is_premajor)
@staticmethod
def filter_by_stem(data_queryset, is_stem):
return data_queryset.filter(is_stem=is_stem)
@staticmethod
def filter_by_class_standing(data_queryset, class_standing_filter):
return data_queryset.filter(class_code=class_standing_filter)
@staticmethod
def filter_by_sports(data_queryset, sport_code_filter):
return data_queryset.filter(sports__sport_code=sport_code_filter)
@staticmethod
def filter_by_advisor(data_queryset, advisor_netid, advisor_type):
advisor_type_id = DataPoint.get_data_type_by_text(advisor_type)
return (data_queryset
.filter(advisor__advisor_netid=advisor_netid)
.filter(advisor__advisor_type=advisor_type_id))
def get_summer_string(self):
term_list = []
if self.has_a_term:
term_list.append("A")
if self.has_b_term:
term_list.append("B")
if self.has_full_term:
term_list.append("Full")
return ', '.join(map(str, term_list))
def get_class_desc(self):
class_codes_map = {
0: "Pending",
1: "Freshman",
2: "Sophomore",
3: "Junior",
4: "Senior",
5: "5th-Year",
6: "Non-Matriculated",
8: "Graduate",
11: "1st Year Professional",
12: "2nd Year Professional",
13: "3rd Year Professional",
14: "4th Year Professional",
}
if self.class_code is not None:
return class_codes_map.get(int(self.class_code))
def json_data(self):
first, last = self.get_first_last_name()
resp = {"student_first_name": first,
"student_last_name": last,
"student_number": self.student_number,
"netid": self.netid,
"priority_score": self.priority_score,
"activity_score": self.activity_score,
"assignment_score": self.assignment_score,
"grade_score": self.grade_score,
"signin_score": self.signin_score,
"is_premajor": self.premajor,
"is_eop": self.eop,
"is_iss": self.iss,
"is_international": self.international,
"is_stem": self.is_stem,
"is_athlete": self.sports.exists(),
"summer_term_string": self.get_summer_string(),
"class_desc": self.get_class_desc(),
"campus_code": self.campus_code
}
if self.advisor is not None:
resp["advisor_name"] = self.advisor.advisor_name
resp["advisor_netid"] = self.advisor.advisor_netid
return resp
@classmethod
def get_class_standing_by_type(cls, datapoint_type, week):
dps = DataPoint.objects.filter(type=datapoint_type) \
.filter(week=week) \
.order_by("class_code")
class_standings = {}
for dp in dps:
if dp.class_code:
class_standings[dp.class_code] = \
{"class_code": int(dp.class_code),
"class_desc": dp.get_class_desc()}
return sorted(class_standings.values(),
key=lambda i: i['class_code'])
@classmethod
def get_all_class_standings(cls, week_number, quarter, year):
week = Week.objects.filter(
number=week_number,
quarter=Week.term_to_quarter_number(quarter),
year=year).get()
prem = cls.get_class_standing_by_type(1, week)
eop = cls.get_class_standing_by_type(2, week)
inter = cls.get_class_standing_by_type(3, week)
iss = cls.get_class_standing_by_type(4, week)
tacoma = cls.get_class_standing_by_type(5, week)
athletic = cls.get_class_standing_by_type(6, week)
return {"Premajor": list(prem),
"EOP": list(eop),
"International": list(inter),
"ISS": list(iss),
"Tacoma": list(tacoma),
"Athletics": list(athletic)}
class Upload(models.Model):
file = models.TextField()
type = models.PositiveSmallIntegerField(choices=DataPoint.TYPE_CHOICES)
uploaded_by = models.CharField(max_length=12)
created_on = models.DateTimeField(auto_now_add=True)
week = models.ForeignKey("Week", on_delete=models.PROTECT)
def json_data(self):
return {"id": self.id,
"created_on": self.created_on.strftime("%d %B, %Y, %I%p"),
"uploaded_by": self.uploaded_by,
"type": self.get_type_display(),
"year": self.week.year,
"week": self.week.number,
"quarter": self.week.get_quarter_display()}
class Meta:
unique_together = ('type', 'week',)
class Advisor(models.Model):
advisor_name = models.TextField()
advisor_netid = models.CharField(max_length=12)
advisor_type = models.PositiveSmallIntegerField(
choices=DataPoint.TYPE_CHOICES)
class Meta:
unique_together = ('advisor_netid', 'advisor_type')
@classmethod
def get_advisor_by_type(cls, advisor_type):
return Advisor.objects.filter(advisor_type=advisor_type) \
.order_by('advisor_name') \
.filter(~Q(advisor_name="")) \
.values('advisor_name', 'advisor_netid')
@classmethod
def get_all_advisors(cls):
prem = cls.get_advisor_by_type(1)
eop = cls.get_advisor_by_type(2)
inter = cls.get_advisor_by_type(3)
iss = cls.get_advisor_by_type(4)
tacoma = cls.get_advisor_by_type(5)
athletic = cls.get_advisor_by_type(6)
return {"Premajor": list(prem),
"EOP": list(eop),
"International": list(inter),
"ISS": list(iss),
"Tacoma": list(tacoma),
"Athletics": list(athletic)}
| 2.375
| 2
|
dataset.py
|
Assassinsarms/Deep-Active-Learning-Network-for-Medical-Image-Segmentation
| 3
|
12780519
|
<gh_stars>1-10
import numpy as np
import torch
import torch.utils.data
from torchvision import datasets, models, transforms
class Dataset(torch.utils.data.Dataset):
def __init__(self, img_paths, mask_paths):
self.img_paths = img_paths
self.mask_paths = mask_paths
def __len__(self):
return len(self.img_paths)
def __getitem__(self, idx):
img_path = self.img_paths[idx]
mask_path = self.mask_paths[idx]
np_img = np.load(img_path)
np_mask = np.load(mask_path)
np_img = np_img.transpose((2, 0, 1))
WT_Label = np_mask.copy()
WT_Label[np_mask == 1] = 1.
WT_Label[np_mask == 2] = 1.
WT_Label[np_mask == 4] = 1.
TC_Label = np_mask.copy()
TC_Label[np_mask == 1] = 1.
TC_Label[np_mask == 2] = 0.
TC_Label[np_mask == 4] = 1.
ET_Label = np_mask.copy()
ET_Label[np_mask == 1] = 0.
ET_Label[np_mask == 2] = 0.
ET_Label[np_mask == 4] = 1.
np_label = np.empty((160, 160, 3))
np_label[:, :, 0] = WT_Label
np_label[:, :, 1] = TC_Label
np_label[:, :, 2] = ET_Label
np_label = np_label.transpose((2, 0, 1))
np_label = np_label.astype("float32")
np_img = np_img.astype("float32")
return np_img, np_label
| 2.359375
| 2
|
visualizer/backend/utils.py
|
CMU-Light-Curtains/SafetyEnvelopes
| 0
|
12780520
|
# This file is compatible with both Python 2 and 3
import base64
import cv2
import json
import numpy as np
from flask import Response
import time
import functools
from collections import deque
class Stream(deque):
"""
A stream stores an output sequence stream of data. It inherits from deque.
Stream contains oldest-newest data from left to right.
Stream has a "capacity" -- if the stream is full, it will drop the oldest data.
vizstream will monitor the stream and will send data to the browser whenever the
stream is updated and its timestamp changes.
"""
def __init__(self, capacity=1, fps=10):
"""
Args:
capacity: (int) maximum capacity of stream.
"""
self.capacity = capacity
self.fps = fps
self.timestamp = 0
super(Stream, self).__init__(maxlen=self.capacity)
def publish(self, **kwargs):
item = dict(data=kwargs, timestamp=self.timestamp)
self.timestamp += 1
self.append(item)
def reset(self):
self.clear()
self.timestamp = 0
def vizstream(app, stream, astype):
if astype == 'scene_cloud':
url = '/api/stream_scene_cloud'
data2msg = data2msg_scene_cloud
elif astype == 'lc_curtain':
url = '/api/stream_lc_curtain'
data2msg = data2msg_lc_curtain
# elif astype == 'camera_image':
# url = '/api/stream_camera_image'
# data2msg = data2msg_camera_image
# elif astype == 'lidar_cloud':
# url = '/api/stream_lidar_cloud'
# data2msg = data2msg_lidar_cloud
# elif astype == 'dt_boxes':
# url = '/api/stream_dt_boxes'
# data2msg = data2msg_dt_boxes
# elif astype == 'entropy_map':
# url = '/api/stream_entropy_map'
# data2msg = data2msg_entropy_map
# elif astype == 'arrows':
# url = '/api/stream_arrows'
# data2msg = data2msg_arrows
else:
raise Exception("astype={} not valid".format(astype))
def generator():
sent_timestamp = None
while True:
if len(stream) == 0:
sent_timestamp = None
elif sent_timestamp != stream[-1]["timestamp"]:
sent_timestamp = stream[-1]["timestamp"]
data = stream[-1]["data"]
msg = data2msg(**data)
yield "data:{}\n\n".format(msg)
time.sleep(1.0 / stream.fps)
@app.route(url, methods=['GET', 'POST'])
@functools.wraps(data2msg)
def route_fn():
return Response(generator(), mimetype="text/event-stream")
########################################################################################################################
# region data2msg functions
########################################################################################################################
def data2msg_scene_cloud(scene_points, se_design_pts=None, downsample=False, int16_factor=100):
"""
Args:
scene_points (np.ndarray, dtype=float32, shape=(N, 6)): scene points
se_design_points (Optional(np.ndarray, dtype=float32, shape=(C, 2))): design points of the safety envelope
"""
# the next line downsamples the scene points. it selects one from every three points.
if downsample:
scene_points = scene_points[::3, :]
# convert to int16
scene_points = scene_points * int16_factor
scene_points = scene_points.astype(np.int16)
scene_pc_str = base64.b64encode(scene_points.tobytes()).decode("utf-8")
send_dict = dict(scene_pc_str=scene_pc_str)
if se_design_pts is not None:
# convert to int16
se_design_pts = se_design_pts * int16_factor
se_design_pts = se_design_pts.astype(np.int16)
se_pc_str = base64.b64encode(se_design_pts.tobytes()).decode("utf-8")
send_dict["se_pc_str"] = se_pc_str
json_str = json.dumps(send_dict)
return json_str
# def data2msg_camera_image(data: Frame):
# image_str = data.cam["image_str"]
# image_dtype = data.cam["datatype"]
# image_b64 = base64.b64encode(image_str).decode("utf-8")
# image_b64 = f"data:image/{image_dtype};base64,{image_b64}"
# return image_b64
# def data2msg_lidar_cloud(data, int16_factor=100):
# points = data # (N, 3)
# # convert to int16
# points = points * int16_factor
# points = points.astype(np.int16)
# pc_str = base64.b64encode(points.tobytes()).decode("utf-8")
# return pc_str
def data2msg_lc_curtain(lc_image, lc_cloud, score=None, int16_factor=100):
"""
Args:
lc_image: light curtain image.
- (np.ndarray, dtype=float32, shape=(H, C, 4)).
- Axis 2 corresponds to (x, y, z, i):
- x : x in cam frame.
- y : y in cam frame.
- z : z in cam frame.
- i : intensity of LC cloud, lying in [0, 255].
lc_cloud: light curtain point cloud.
- (np.ndarray, dtype=float32, shape=(N, 4)).
- Axis 2 corresponds to (x, y, z, i):
- x : x in cam frame.
- y : y in cam frame.
- z : z in cam frame.
- i : intensity of LC cloud, lying in [0, 1].
score (Optional(float)): score to be displayed in kittiviewer
"""
# boundary
lc_image = lc_image[:, :, :3] # (H, C, 3)
ys = lc_image[:, :, 1] # (H, C)
ys[np.isnan(ys)] = 0 # replacing NaNs with zeros shouldn't affect the columnwise min or max of y
top_inds = np.argmin(ys, axis=0) # (C,)
bot_inds = np.argmax(ys, axis=0) # (C,)
top_xyz = lc_image[top_inds, np.arange(len(top_inds)), :] # (C, 3)
bot_xyz = lc_image[bot_inds, np.arange(len(bot_inds)), :] # (C, 3)
boundary = np.stack([top_xyz, bot_xyz], axis=1) # (C, 2, 3)
mask = np.isfinite(boundary).all(axis=(1, 2)) # (C,)
boundary = boundary[mask] # (C', 2, 3)
# intersection points
isect_pts = lc_cloud[lc_cloud[:, 3] > 0.05] # (N', 4)
# convert to int16
boundary = (boundary * int16_factor).astype(np.int16)
isect_pts = (isect_pts * int16_factor).astype(np.int16)
boundary_str = base64.b64encode(boundary.tobytes()).decode("utf-8")
isect_pts_str = base64.b64encode(isect_pts.tobytes()).decode("utf-8")
send_dict = dict(boundary=boundary_str, isect_pts=isect_pts_str, score=score)
json_str = json.dumps(send_dict)
return json_str
# def data2msg_dt_boxes(data):
# dt_boxes = data["detections"]
# json_str = json.dumps(dt_boxes)
# return json_str
# def data2msg_entropy_map(data):
# confidence_map = data["confidence_map"]
# entropy_heatmap = _create_entropy_heatmap(confidence_map)
# image_str = cv2.imencode('.png', entropy_heatmap)[1].tostring()
# image_b64 = base64.b64encode(image_str).decode("utf-8")
# image_b64 = f"data:image/png;base64,{image_b64}"
# return image_b64
# def data2msg_arrows(data):
# tails = list([float(e) for e in data["tails"].ravel()])
# heads = list([float(e) for e in data["heads"].ravel()])
# arrows = dict(tails=tails, heads=heads)
# json_str = json.dumps(arrows)
# return json_str
# endregion
########################################################################################################################
# region Helper functions
########################################################################################################################
# def _create_confidence_heatmap(confidence_map):
# # Take the mean of confidences for the 0-degrees and 90-degrees anchors
# conf_scores = confidence_map[:, :, 2:] # (Y, X, K)
# conf_scores = conf_scores.mean(axis=2) # (Y, X)
# # Rescale between 0 and 1.
# # conf_scores = conf_scores - conf_scores.min()
# # conf_scores = conf_scores / conf_scores.max()
# heatmap = cv2.applyColorMap((conf_scores * 255).astype(np.uint8), cv2.COLORMAP_HOT)
# return heatmap
# def _create_entropy_heatmap(confidence_map):
# p = confidence_map[:, :, 2:] # (Y, X, K)
# p = p.clip(1e-5, 1-1e-5) # (Y, X, K)
# entropy = -p * np.log2(p) - (1-p) * np.log2(1-p) # (Y, X, K)
# entropy = entropy.mean(axis=2) # (Y, X)
# heatmap = cv2.applyColorMap((entropy * 255).astype(np.uint8), cv2.COLORMAP_HOT)
# return heatmap
# endregion
########################################################################################################################
| 3.015625
| 3
|
network.py
|
Ammaruit/pwoc
| 10
|
12780521
|
<filename>network.py
import tensorflow as tf
import tensorflow_addons as tfa
from modules import FeaturePyramidNetwork, SceneFlowEstimator, ContextNetwork, OcclusionEstimator, CostVolumeLayer
class Network(tf.keras.Model):
def __init__(self, occlusion=True, mean_pixel=None):
super(Network, self).__init__()
self.occlusion = occlusion
self.mean_pixel = tf.Variable((0.0, 0.0, 0.0), trainable=False, name='mean_pixel', dtype=tf.float32)
if mean_pixel:
self.mean_pixel.assign(mean_pixel)
with tf.name_scope('model'):
self.correlation_layer = CostVolumeLayer()
with tf.name_scope('feature_pyramid_network'):
self.feature_pyramid_network = FeaturePyramidNetwork(tf.keras.Input(shape=(None, None, 3), dtype=tf.float32))
self.sceneflow_estimators = []
for (d, l) in zip([367, 307, 275, 243, 211], [6, 5, 4, 3, 2]):
with tf.name_scope('scene_flow_estimator_' + str(l)):
self.sceneflow_estimators.append(SceneFlowEstimator(tf.keras.Input(shape=(None, None, d), dtype=tf.float32), level=l, highest_resolution=(l==2)))
with tf.name_scope('context_network'):
self.context_network = ContextNetwork(tf.keras.Input(shape=(None, None, 36), dtype=tf.float32))
if occlusion:
self.occlusion_estimators = []
for (d, l) in zip([392, 258, 194, 130, 66], [6, 5, 4, 3, 2]):
with tf.name_scope('occlusion_estimator_'+str(l)):
self.occlusion_estimators.append(OcclusionEstimator(tf.keras.Input(shape=(None, None, d), dtype=tf.float32), level=l, highest_resolution=(l==2)))
def call(self, inputs, training=False, mask=None):
input_shape = tf.shape(inputs[0])
input_h, input_w = input_shape[1], input_shape[2]
h_fix = tf.cast(tf.round(tf.cast(input_h, tf.float32) / 64.) * 64, tf.int32)
w_fix = tf.cast(tf.round(tf.cast(input_w, tf.float32) / 64.) * 64, tf.int32)
new_size = tf.convert_to_tensor([h_fix, w_fix])
nl1 = tf.image.resize(inputs[0], new_size) - self.mean_pixel
nr1 = tf.image.resize(inputs[1], new_size) - self.mean_pixel
nl2 = tf.image.resize(inputs[2], new_size) - self.mean_pixel
nr2 = tf.image.resize(inputs[3], new_size) - self.mean_pixel
pyramid_l1 = self.feature_pyramid_network(nl1)
pyramid_r1 = self.feature_pyramid_network(nr1)
pyramid_l2 = self.feature_pyramid_network(nl2)
pyramid_r2 = self.feature_pyramid_network(nr2)
up_flow, up_feature = None, None
features, flow = None, None
occ_features_up, occ_mask_up = [], []
flows = [] # multi-scale output
# for each relevant pyramid level
for i, (fl1, fr1, fl2, fr2) in enumerate(zip(pyramid_l1, pyramid_r1, pyramid_l2, pyramid_r2)):
level = 6 - i
first_iteration = (i==0)
last_iteration = (level==2)
if first_iteration:
wr1 = fr1
wl2 = fl2
wr2 = fr2
else:
# Careful! dense_image_warp expects flow of shape (B x) H x W x [v,u] and s u b t r a c t s the displacement. --> adjust scene flow accordingly
disparity_displacement = tf.stack([tf.zeros_like(up_flow[:,:,:,2]), up_flow[:,:,:,2]], axis=-1) * 20.0 / (2.0 ** level)
wr1 = tfa.image.dense_image_warp(fr1, disparity_displacement)
flow_displacement = -up_flow[:,:,:,1::-1] * 20.0 / (2.0 ** level)
wl2 = tfa.image.dense_image_warp(fl2, flow_displacement)
cross_displacement = tf.stack([-up_flow[:,:,:,1], up_flow[:,:,:,3] - up_flow[:,:,:,0]], axis=-1) * 20.0 / (2.0 ** level)
wr2 = tfa.image.dense_image_warp(fr2, cross_displacement)
if self.occlusion:
occ_masks = []
for warped in [wr1, wl2, wr2]:
occ_inputs = tf.concat([fl1, warped], axis=-1)
if not first_iteration: # all but the first iteration
occ_inputs = tf.concat([occ_inputs, occ_features_up.pop(0), occ_mask_up.pop(0)], axis=-1)
if last_iteration:
occ_mask = self.occlusion_estimators[i](occ_inputs)
else:
occ_mask, feat_up, mask_up = self.occlusion_estimators[i](occ_inputs)
occ_features_up.append(feat_up)
occ_mask_up.append(mask_up)
occ_masks.append(occ_mask)
wr1 *= occ_masks[0]
wl2 *= occ_masks[1]
wr2 *= occ_masks[2]
cvr1 = self.correlation_layer(fl1, wr1, dimension=1)
cvl2 = self.correlation_layer(fl1, wl2, dimension=2)
cvr2 = self.correlation_layer(fl1, wr2, dimension=2)
input_list = [cvr1, cvl2, cvr2, fl1]
if not first_iteration: # all but the first iteration
input_list.append(up_flow)
input_list.append(up_feature)
estimator_input = tf.concat(input_list, axis=-1)
if last_iteration:
features, flow = self.sceneflow_estimators[i](estimator_input)
else:
flow, up_flow, up_feature = self.sceneflow_estimators[i](estimator_input)
flows.append(tf.identity(flow, name='prediction'+str(level)))
residual_flow = self.context_network(tf.concat([features, flow], axis=-1))
refined_flow = flow + residual_flow
flows.append(refined_flow)
prediction = tf.multiply(tf.image.resize(refined_flow, size=(input_h, input_w)), 20.0, name='final_prediction')
if training:
return prediction, flows
else:
return prediction
| 2.515625
| 3
|
platform/polycommon/polycommon/unique_urls.py
|
erexer/polyaxon
| 0
|
12780522
|
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_user_url(username: str) -> str:
return "/{}".format(username)
def get_project_url(unique_name: str) -> str:
values = unique_name.split(".")
return "{}/{}".format(get_user_url(values[0]), values[1])
def get_user_project_url(username: str, project_name: str) -> str:
return "{}/{}".format(get_user_url(username), project_name)
def get_run_url(unique_name: str) -> str:
values = unique_name.split(".")
project_url = get_user_project_url(username=values[0], project_name=values[1])
return f"{project_url}/runs/{values[-1]}"
def get_run_health_url(unique_name: str) -> str:
run_url = get_run_url(unique_name=unique_name)
return f"{run_url}/_heartbeat"
def get_run_reconcile_url(unique_name: str) -> str:
run_url = get_run_url(unique_name=unique_name)
return "{}/_reconcile".format(run_url)
| 2.296875
| 2
|
functions/data.py
|
arknano/meeku_bot.py
| 2
|
12780523
|
import os
import json
local_path = os.path.dirname(__file__)
def load_config_file(path):
file = open(os.path.join(local_path, os.pardir, path))
return json.load(file)
def load_loc():
return load_config_file('config/loc.json')
def load_responses_config():
return load_config_file('config/responses.json')
def load_bot_config():
return load_config_file('config/config.json')
def load_emoji_config():
file = open(os.path.join(local_path, os.pardir, 'config/emoji.json'), encoding="utf8")
return json.load(file)
def load_gif_config():
return load_config_file('config/gif.json')
def load_tokens():
return load_config_file('config/token.json')
| 2.390625
| 2
|
tests/utils.py
|
unplugstudio/mezzanine-webinars
| 1
|
12780524
|
from __future__ import unicode_literals, absolute_import
from django.contrib.auth import get_user_model
from django.test import TestCase
from django_functest import FuncWebTestMixin, ShortcutLoginMixin
User = get_user_model()
class WebTestBase(FuncWebTestMixin, ShortcutLoginMixin, TestCase):
@classmethod
def setUpTestData(cls):
super(WebTestBase, cls).setUpTestData()
cls.USER = User.objects.create_user(
username="user", password="<PASSWORD>", email="<EMAIL>"
)
| 1.96875
| 2
|
sankaku/lib/objects.py
|
Slimakoi/Sankaku.py
| 1
|
12780525
|
<gh_stars>1-10
class UserProfile:
def __init__(self, data):
self.json = data
self.artist_update_count = None
self.avatar_rating = None
self.avatar_url = None
self.comment_count = None
self.created_at = None
self.email = None
self.email_verification_status = None
self.enable_multi_factor_authentication = None
self.favs_are_private = None
self.filter_content = None
self.forum_post_count = None
self.hide_ads = None
self.id = None
self.is_verified = None
self.last_logged_in_at = None
self.level = None
self.name = None
self.note_update_count = None
self.pool_update_count = None
self.pool_upload_count = None
self.post_update_count = None
self.post_upload_count = None
self.receive_dmails = None
self.subscription_level = None
self.wiki_update_count = None
@property
def UserProfile(self):
try: self.artist_update_count = self.json["artist_update_count"]
except (KeyError, TypeError): pass
try: self.avatar_rating = self.json["avatar_rating"]
except (KeyError, TypeError): pass
try: self.avatar_url = self.json["avatar_url"]
except (KeyError, TypeError): pass
try: self.comment_count = self.json["comment_count"]
except (KeyError, TypeError): pass
try: self.created_at = self.json["created_at"]
except (KeyError, TypeError): pass
try: self.email = self.json["email"]
except (KeyError, TypeError): pass
try: self.email_verification_status = self.json["email_verification_status"]
except (KeyError, TypeError): pass
try: self.enable_multi_factor_authentication = self.json["enable_multi_factor_authentication"]
except (KeyError, TypeError): pass
try: self.favs_are_private = self.json["favs_are_private"]
except (KeyError, TypeError): pass
try: self.filter_content = self.json["filter_content"]
except (KeyError, TypeError): pass
try: self.forum_post_count = self.json["forum_post_count"]
except (KeyError, TypeError): pass
try: self.hide_ads = self.json["hide_ads"]
except (KeyError, TypeError): pass
try: self.id = self.json["id"]
except (KeyError, TypeError): pass
try: self.is_verified = self.json["is_verified"]
except (KeyError, TypeError): pass
try: self.last_logged_in_at = self.json["last_logged_in_at"]
except (KeyError, TypeError): pass
try: self.level = self.json["level"]
except (KeyError, TypeError): pass
try: self.name = self.json["name"]
except (KeyError, TypeError): pass
try: self.note_update_count = self.json["note_update_count"]
except (KeyError, TypeError): pass
try: self.pool_update_count = self.json["pool_update_count"]
except (KeyError, TypeError): pass
try: self.pool_upload_count = self.json["pool_upload_count"]
except (KeyError, TypeError): pass
try: self.post_update_count = self.json["post_update_count"]
except (KeyError, TypeError): pass
try: self.post_upload_count = self.json["post_upload_count"]
except (KeyError, TypeError): pass
try: self.receive_dmails = self.json["receive_dmails"]
except (KeyError, TypeError): pass
try: self.subscription_level = self.json["subscription_level"]
except (KeyError, TypeError): pass
try: self.wiki_update_count = self.json["wiki_update_count"]
except (KeyError, TypeError): pass
return self
class Posts:
def __init__(self, data):
self.json = data
self.id = []
self.rating = []
self.status = []
self.author = []
self.sample_url = []
self.sample_width = []
self.sample_height = []
self.preview_url = []
self.preview_width = []
self.preview_height = []
self.file_url = []
self.width = []
self.height = []
self.file_size = []
self.file_type = []
self.created_at = []
self.has_children = []
self.has_comments = []
self.has_notes = []
self.is_favorited = []
self.user_vote = []
self.md5 = []
self.parent_id = []
self.change = []
self.fav_count = []
self.recommended_posts = []
self.recommended_score = []
self.vote_count = []
self.total_score = []
self.comment_count = []
self.source = []
self.in_visible_pool = []
self.is_premium = []
self.is_rating_locked = []
self.is_note_locked = []
self.is_status_locked = []
self.redirect_to_signup = []
self.sequence = []
self.tags = []
@property
def Posts(self):
for item in self.json:
try: self.id.append(item["id"])
except (KeyError, TypeError): self.id.append(None)
try: self.rating.append(item["rating"])
except (KeyError, TypeError): self.rating.append(None)
try: self.status.append(item["status"])
except (KeyError, TypeError): self.status.append(None)
try: self.author.append(item["author"])
except (KeyError, TypeError): self.author.append(None)
try: self.sample_url.append(item["sample_url"])
except (KeyError, TypeError): self.sample_url.append(None)
try: self.sample_width.append(item["sample_width"])
except (KeyError, TypeError): self.sample_width.append(None)
try: self.sample_height.append(item["sample_height"])
except (KeyError, TypeError): self.sample_height.append(None)
try: self.preview_url.append(item["preview_url"])
except (KeyError, TypeError): self.preview_url.append(None)
try: self.preview_width.append(item["preview_width"])
except (KeyError, TypeError): self.preview_width.append(None)
try: self.preview_height.append(item["preview_height"])
except (KeyError, TypeError): self.preview_height.append(None)
try: self.file_url.append(item["file_url"])
except (KeyError, TypeError): self.file_url.append(None)
try: self.width.append(item["width"])
except (KeyError, TypeError): self.width.append(None)
try: self.height.append(item["height"])
except (KeyError, TypeError): self.height.append(None)
try: self.file_size.append(item["file_size"])
except (KeyError, TypeError): self.file_size.append(None)
try: self.file_type.append(item["file_type"])
except (KeyError, TypeError): self.file_type.append(None)
try: self.created_at.append(item["created_at"])
except (KeyError, TypeError): self.created_at.append(None)
try: self.has_children.append(item["has_children"])
except (KeyError, TypeError): self.has_children.append(None)
try: self.has_comments.append(item["has_comments"])
except (KeyError, TypeError): self.has_comments.append(None)
try: self.has_notes.append(item["has_notes"])
except (KeyError, TypeError): self.has_notes.append(None)
try: self.is_favorited.append(item["is_favorited"])
except (KeyError, TypeError): self.is_favorited.append(None)
try: self.user_vote.append(item["user_vote"])
except (KeyError, TypeError): self.user_vote.append(None)
try: self.md5.append(item["md5"])
except (KeyError, TypeError): self.md5.append(None)
try: self.parent_id.append(item["parent_id"])
except (KeyError, TypeError): self.parent_id.append(None)
try: self.change.append(item["change"])
except (KeyError, TypeError): self.change.append(None)
try: self.fav_count.append(item["fav_count"])
except (KeyError, TypeError): self.fav_count.append(None)
try: self.recommended_posts.append(item["recommended_posts"])
except (KeyError, TypeError): self.recommended_posts.append(None)
try: self.recommended_score.append(item["recommended_score"])
except (KeyError, TypeError): self.recommended_score.append(None)
try: self.vote_count.append(item["vote_count"])
except (KeyError, TypeError): self.vote_count.append(None)
try: self.total_score.append(item["total_score"])
except (KeyError, TypeError): self.total_score.append(None)
try: self.comment_count.append(item["comment_count"])
except (KeyError, TypeError): self.comment_count.append(None)
try: self.source.append(item["source"])
except (KeyError, TypeError): self.source.append(None)
try: self.in_visible_pool.append(item["in_visible_pool"])
except (KeyError, TypeError): self.in_visible_pool.append(None)
try: self.is_premium.append(item["is_premium"])
except (KeyError, TypeError): self.is_premium.append(None)
try: self.is_rating_locked.append(item["is_rating_locked"])
except (KeyError, TypeError): self.is_rating_locked.append(None)
try: self.is_note_locked.append(item["is_note_locked"])
except (KeyError, TypeError): self.is_note_locked.append(None)
try: self.is_status_locked.append(item["is_status_locked"])
except (KeyError, TypeError): self.is_status_locked.append(None)
try: self.redirect_to_signup.append(item["redirect_to_signup"])
except (KeyError, TypeError): self.redirect_to_signup.append(None)
try: self.sequence.append(item["sequence"])
except (KeyError, TypeError): self.sequence.append(None)
try: self.tags.append(Tags(item["tags"]).Tags)
except (KeyError, TypeError): self.tags.append(Tags([]).Tags)
return self
class PostsTags:
def __init__(self, data):
self.json = data
self.id = []
self.name = []
self.name_en = []
self.name_ja = []
self.type = []
self.count = []
self.post_count = []
self.pool_count = []
self.tagName = []
@property
def PostsTags(self):
for item in self.json:
try: self.id.append(item["id"])
except (KeyError, TypeError): self.id.append(None)
try: self.name.append(item["name"])
except (KeyError, TypeError): self.name.append(None)
try: self.name_en.append(item["name_en"])
except (KeyError, TypeError): self.name_en.append(None)
try: self.name_ja.append(item["name_ja"])
except (KeyError, TypeError): self.name_ja.append(None)
try: self.type.append(item["type"])
except (KeyError, TypeError): self.type.append(None)
try: self.count.append(item["count"])
except (KeyError, TypeError): self.count.append(None)
try: self.post_count.append(item["post_count"])
except (KeyError, TypeError): self.post_count.append(None)
try: self.pool_count.append(item["pool_count"])
except (KeyError, TypeError): self.pool_count.append(None)
try: self.tagName.append(item["tagName"])
except (KeyError, TypeError): self.tagName.append(None)
return self
class Tags:
def __init__(self, data):
self.json = data
self.count = []
self.id = []
self.locale = []
self.name = []
self.name_en = []
self.name_ja = []
self.pool_count = []
self.post_count = []
self.rating = []
self.tagName = []
self.type = []
self.version = []
@property
def Tags(self):
for item in self.json:
try: self.count.append(item["count"])
except (KeyError, TypeError): self.count.append(None)
try: self.id.append(item["id"])
except (KeyError, TypeError): self.id.append(None)
try: self.locale.append(item["locale"])
except (KeyError, TypeError): self.locale.append(None)
try: self.name.append(item["name"])
except (KeyError, TypeError): self.name.append(None)
try: self.name_en.append(item["name_en"])
except (KeyError, TypeError): self.name_en.append(None)
try: self.name_ja.append(item["name_ja"])
except (KeyError, TypeError): self.name_ja.append(None)
try: self.pool_count.append(item["pool_count"])
except (KeyError, TypeError): self.pool_count.append(None)
try: self.post_count.append(item["post_count"])
except (KeyError, TypeError): self.post_count.append(None)
try: self.rating.append(item["rating"])
except (KeyError, TypeError): self.rating.append(None)
try: self.tagName.append(item["tagName"])
except (KeyError, TypeError): self.tagName.append(None)
try: self.type.append(item["type"])
except (KeyError, TypeError): self.type.append(None)
try: self.version.append(item["version"])
except (KeyError, TypeError): self.version.append(None)
return self
| 2.28125
| 2
|
test.py
|
skoblov-lab/SciLK
| 10
|
12780526
|
import unittest
from typing import Sequence, Iterable, cast, Mapping
import tempfile
import os
import numpy as np
import joblib
from hypothesis import given, note
from hypothesis import settings, strategies as st
from scilk.corpora import genia
from scilk.util import intervals
from scilk.collections import _collections
import scilk
MAX_TESTS = 1000
# strategies
texts = st.text(st.characters(min_codepoint=32, max_codepoint=255), 0, 500, 1000)
def loader_caller(collection: _collections.Collection, data=None):
def caller(value: str):
return collection.translate(value)
return caller
def loader_translate(collection: _collections.Collection, data: dict):
mapping = joblib.load(data['mapping'])
def translate(value: str):
return mapping.get(value)
return translate
# test cases
class TestText(unittest.TestCase):
@staticmethod
def unparse(txt, intervals_: Sequence[intervals.Interval]):
if not len(intervals_):
return ""
codes = np.repeat([ord(" ")], intervals_[-1].stop)
for iv in intervals_:
token = intervals.extract(txt, [iv])[0]
codes[iv.start:iv.stop] = list(map(ord, token))
return "".join(map(chr, codes))
# @given(texts)
# @settings(max_examples=MAX_TESTS)
# def test_parse_text(self, txt):
# parsed = text.tointervals(text.fine_tokeniser, txt)
# mod_text = re.sub("\s", " ", txt)
# self.assertEqual(self.unparse(txt, parsed), mod_text.rstrip())
class TestGenia(unittest.TestCase):
@given(st.lists(st.text()))
@settings(max_examples=MAX_TESTS)
def test_text_boundaries(self, texts: list):
"""
Test of text_boundaries() function.
:return:
"""
boundaries = genia._segment_borders(texts)
note(boundaries)
self.assertTrue(all([boundaries[i][1] == boundaries[i + 1][0] for i in
range(len(boundaries) - 1)]))
self.assertTrue(all([boundaries[i][0] <= boundaries[i][1] for i in
range(len(boundaries) - 1)]))
if boundaries:
self.assertTrue(boundaries[0][0] == 0)
class TestCollection(unittest.TestCase):
def test_collection(self):
with tempfile.TemporaryDirectory() as dirpath:
scilk.SCILK_ROOT = dirpath
mapping = dict(test='OK')
mapping_path = os.path.join(dirpath, 'mapping.joblib')
joblib.dump(mapping, mapping_path)
collection = _collections.Collection()
collection.add('translate', loader_translate, dict(mapping=mapping_path))
collection.add('caller', loader_caller)
self.assertAlmostEqual(collection.caller('test'), 'OK')
collection.save(name='test')
collection = _collections.Collection.load('test')
self.assertAlmostEqual(collection.caller('test'), 'OK')
self.assertEquals({'translate', 'caller'}, set(collection.entries))
if __name__ == '__main__':
unittest.main()
| 2.53125
| 3
|
entrypoint.py
|
brenshanny/project_lob
| 1
|
12780527
|
import os
import logging
import argparse
import sys
from .controllers.hot_lobs import HotLobMonitor
from .controllers.cold_lobs import ColdLobMonitor
if __name__ == "__main__":
# Check for config file
if "PROJECT_LOB_CONFIG" not in os.environ:
print("PROJECT_LOB_CONFIG must be an environment variable set to "
"the path of a JSON config file.")
sys.exit()
config_path = os.environ["PROJECT_LOB_CONFIG"]
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("-hm", "--hot-monitor", help="Run the hot lob monitor",
action="store_true")
parser.add_argument("-cm", "--cold-monitor", help="Run the cold lob monitor",
action="store_true")
args = parser.parse_args()
if not args.hot_monitor and not args.cold_monitor:
print("Must specify a program to run, hot monitor or cold"
" controller!")
sys.exit()
if args.hot_monitor and args.cold_monitor:
print("Cannot run both hot and cold monitoring programs."
" Please specify one")
sys.exit()
# Setup logging
logger = logging.getLogger('project_lob')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('lobster_log.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
# Run monitor
if args.hot_monitor:
logger.info("Creating hot lob monitor!")
monitor = HotLobMonitor(config_path)
if args.cold_monitor:
logger.info("Creating cold lob monitor!")
monitor = ColdLobMonitor(config_path)
monitor.run()
| 2.375
| 2
|
pynrc/maths/fast_poly.py
|
JarronL/pyNRC
| 6
|
12780528
|
#from __future__ import absolute_import, division, print_function, unicode_literals
# import numpy as np
# from numpy.polynomial import legendre
from webbpsf_ext.maths import jl_poly, jl_poly_fit, fit_bootstrap
| 1.125
| 1
|
tests/datatypes/test-NMTOKENS.py
|
thorstenb/pyxb
| 0
|
12780529
|
<filename>tests/datatypes/test-NMTOKENS.py
from pyxb.exceptions_ import *
import unittest
import pyxb.binding.datatypes as xsd
class Test_NMTOKENS (unittest.TestCase):
def testBasicLists (self):
v = xsd.NMTOKENS([ "one", "_two", "three" ])
self.assertEqual(3, len(v))
self.assertTrue(isinstance(v[0], xsd.NMTOKEN))
self.assertEqual("one", v[0])
def testStringLists (self):
v = xsd.NMTOKENS("one _two three")
self.assertEqual(3, len(v))
self.assertEqual("one", v[0])
self.assertTrue(isinstance(v[0], xsd.NMTOKEN))
self.assertRaises(BadTypeValueError, xsd.NMTOKENS, 'string with b@d id')
if __name__ == '__main__':
unittest.main()
| 2.484375
| 2
|
dataset-scripts/simulated_import.py
|
cnasikas/smpc-analytics
| 12
|
12780530
|
<reponame>cnasikas/smpc-analytics<gh_stars>10-100
import argparse
import sys
import json
import pandas as pd
import os
from subprocess import Popen, PIPE, STDOUT
from huepy import *
import hashlib
CURRENT_FILE_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
class ProcessError(Exception):
def __init__(self, message=''):
self.message = message
def __str__(self):
return self.message
def execute(command, stdout, stdin, stderr, verbose=False):
if verbose:
# print('[INFO] Running: ' + ' '.join(command))
print(run('Running: ' + ' '.join(command) +' from '+ CURRENT_FILE_DIRECTORY))
process = Popen(command, stdout=stdout, stdin = stdin, stderr = stderr, cwd = CURRENT_FILE_DIRECTORY)
out, err = process.communicate();
rc = process.returncode
if rc != 0:
if verbose:
print(out)
raise ProcessError()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('file', help = 'CSV file to be imported')
parser.add_argument('--table', help= 'Optional table name')
parser.add_argument('--attributes', help = 'Optional argument. A subset of the CSV columns only which will be imported. Semi-colon separated column names.')
parser.add_argument('--verbose', help = 'See executed commands in verbose output', action = 'store_true')
args = parser.parse_args()
secrec_filename = 'simulated_import_' + str(os.getpid())
secrec_source = secrec_filename + '.sc'
secrec_executable =secrec_filename + '.sb'
build_secrec_script(args.file, args.table, args.verbose, args.attributes, secrec_source)
try:
execute(['../sharemind-scripts/compile.sh',os.path.relpath(secrec_source, CURRENT_FILE_DIRECTORY)], stdout=PIPE, stdin=PIPE, stderr=STDOUT, verbose=args.verbose)
except ProcessError as e:
print(bad('Error in secrec compilation'))
sys.exit(-1)
try:
execute(['../sharemind-scripts/run.sh', os.path.relpath(secrec_executable, CURRENT_FILE_DIRECTORY)], stdout=PIPE, stdin=PIPE, stderr=STDOUT, verbose=args.verbose)
except ProcessError as e:
print(bad('Error in secrec execution'))
sys.exit(-1)
print(good('Data successfully imported.'))
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def quote(x):
if is_number(x):
return str(x)
else:
return '"' + x + '"'
def build_secrec_script(data, table, verbose, columns, secrec_source = 'simulated_import.sc'):
indentation = ' '
imports = '''
import shared3p;
import shared3p_random;
import shared3p_sort;
import stdlib;
import shared3p_table_database;
import table_database;
domain pd_shared3p shared3p;
'''
main_f = '''
void main(){
'''
directory, basename = os.path.split(data)
basename = os.path.splitext(basename)[0]
table_name = basename
if table != None:
table_name = table
df=pd.read_csv(data,sep=',')
if columns != None:
df = df [columns.split(';')]
main_f += '''
string datasource = "DS1";
string table = ''' + quote(table_name) + ''';
uint64 rows = ''' + quote(len(df.index)) + ''';
uint64 columns = ''' + quote(len(df.columns)) + ''';
'''
imported_array = []
for index, row in df.iterrows():
imported_array += [row[i] for i in df.columns]
data_type = 'float64'
main_f += '''
pd_shared3p ''' + data_type + '''[[2]] imported_array = reshape({''' + ','.join(map(str,imported_array)) + '''}, rows, columns);
print("Opening connection to db: ", datasource);
tdbOpenConnection(datasource);
print("Table: " + table);
// Check if a table exists
if (tdbTableExists(datasource, table)) {
// Delete existing table
print("Deleting existing table: ", table);
tdbTableDelete(datasource, table);
}
print("Creating new table: ", table);
uint64 nrows = shape(imported_array)[0];
uint64 ncols = shape(imported_array)[1];
uint64 parameters = tdbVmapNew();
'''
i = 0
for attribute in df.columns:
main_f += '''
pd_shared3p ''' + data_type + ''' v''' + str(i) + ''';
tdbVmapAddType(parameters, "types", v''' + str(i) + ''');
tdbVmapAddString(parameters, "names", ''' + quote(attribute) + ''');
'''
i += 1
main_f += '''
tdbTableCreate(datasource, table, parameters);
print("Inserting data to table " + table + "...");
pd_shared3p ''' + data_type + '''[[1]] row;
for (uint i = 0; i < nrows; ++i) {
row = imported_array[i,:];
tdbInsertRow(datasource, table, row);
}
print("Done inserting in table " + table + "\\n\\n");
tdbCloseConnection(datasource);
}'''
with open(secrec_source, 'w') as output:
output.write(imports)
output.write(main_f)
if verbose:
print(good('Secrec import script generated at '+secrec_source))
if __name__ == '__main__':
main()
| 2.171875
| 2
|
main.py
|
yaxollum/physics-calculator
| 0
|
12780531
|
import math
from math import pi,sqrt,radians,degrees
import sys
g=9.8
G=6.67e-11
e=1.6021917e-19
ke=8.9875e9
me=9.1095e-31
mp=1.67261e-27
mn=1.674929e-27
blocked=["exec","sys","eval","PROCESS","CMD","RESULT","block","import","math","from",]
def sin(deg):
return math.sin(radians(deg))
def cos(deg):
return math.cos(radians(deg))
def tan(deg):
return math.tan(radians(deg))
def asin(n):
return degrees(math.asin(n))
def acos(n):
return degrees(math.acos(n))
def atan(n):
return degrees(math.atan(n))
def sq(n):
return n*n
ans=0
def PROCESS(CMD):
global ans
for block in blocked:
if block in CMD:
print('The keyword "'+block+'" has been blocked for security reasons.')
return
RESULT=eval(CMD)
if type(RESULT).__name__=="float" and abs(RESULT)>=1e-3 and abs(RESULT)<=1e9:
print("%.6f"%RESULT)
elif type(RESULT).__name__=="float" and abs(RESULT)>=1e9:
print("%e"%RESULT)
else:
print(RESULT)
ans=RESULT
print("Welcome to YAXO Physics Calculator!\n")
while True:
try:
PROCESS(input(">>> "))
except KeyboardInterrupt:
break
except:
print(sys.exc_info()[0].__name__)
input("\nThank you for using YAXO Physics Calculator! (Press Return to exit)")
| 3.296875
| 3
|
src/server/db/__init__.py
|
ralfstefanbender/Studifix2
| 0
|
12780532
|
print("db package (Mapper) wird initialisiert...")
| 1.15625
| 1
|
2_7.py
|
JeffreyAsuncion/PCEP_training_2020_12
| 0
|
12780533
|
<reponame>JeffreyAsuncion/PCEP_training_2020_12<filename>2_7.py<gh_stars>0
# Looping your code with while
# we will store the current greatest number here
max = -999999999
# get the first value *
number = int(input("Enter value or -1 to stop: "))
# if the number is not equal to -1 we will continue
while number != -1:
# is the number greater than max?
if number > max:
# yes - update max
max = number
# get next number
number = int(input("Enter value or -1 to stop: "))
# print the largest number
print("The largest number is ", max)
| 4.125
| 4
|
pybar/daq/readout_utils.py
|
laborleben/pyBAR
| 10
|
12780534
|
<filename>pybar/daq/readout_utils.py
import logging
import os
import numpy as np
import tables as tb
class NameValue(tb.IsDescription):
name = tb.StringCol(256, pos=0)
value = tb.StringCol(4 * 1024, pos=0)
def save_configuration_dict(h5_file, configuation_name, configuration, **kwargs):
'''Stores any configuration dictionary to HDF5 file.
Parameters
----------
h5_file : string, file
Filename of the HDF5 configuration file or file object.
configuation_name : str
Configuration name. Will be used for table name.
configuration : dict
Configuration dictionary.
'''
def save_conf():
try:
h5_file.remove_node(h5_file.root.configuration, name=configuation_name)
except tb.NodeError:
pass
try:
configuration_group = h5_file.create_group(h5_file.root, "configuration")
except tb.NodeError:
configuration_group = h5_file.root.configuration
scan_param_table = h5_file.create_table(configuration_group, name=configuation_name, description=NameValue, title=configuation_name)
row_scan_param = scan_param_table.row
for key, value in dict.iteritems(configuration):
row_scan_param['name'] = key
row_scan_param['value'] = str(value)
row_scan_param.append()
scan_param_table.flush()
if isinstance(h5_file, tb.file.File):
save_conf()
else:
if os.path.splitext(h5_file)[1].strip().lower() != ".h5":
h5_file = os.path.splitext(h5_file)[0] + ".h5"
with tb.open_file(h5_file, mode="a", title='', **kwargs) as h5_file:
save_conf()
def convert_data_array(array, filter_func=None, converter_func=None): # TODO: add copy parameter, otherwise in-place
'''Filter and convert raw data numpy array (numpy.ndarray).
Parameters
----------
array : numpy.array
Raw data array.
filter_func : function
Function that takes array and returns true or false for each item in array.
converter_func : function
Function that takes array and returns an array or tuple of arrays.
Returns
-------
data_array : numpy.array
Data numpy array of specified dimension (converter_func) and content (filter_func)
'''
# if filter_func != None:
# if not hasattr(filter_func, '__call__'):
# raise ValueError('Filter is not callable')
if filter_func:
array = array[filter_func(array)]
# if converter_func != None:
# if not hasattr(converter_func, '__call__'):
# raise ValueError('Converter is not callable')
if converter_func:
array = converter_func(array)
return array
def convert_data_iterable(data_iterable, filter_func=None, converter_func=None): # TODO: add concatenate parameter
'''Convert raw data in data iterable.
Parameters
----------
data_iterable : iterable
Iterable where each element is a tuple with following content: (raw data, timestamp_start, timestamp_stop, status).
filter_func : function
Function that takes array and returns true or false for each item in array.
converter_func : function
Function that takes array and returns an array or tuple of arrays.
Returns
-------
data_list : list
Data list of the form [(converted data, timestamp_start, timestamp_stop, status), (...), ...]
'''
data_list = []
for item in data_iterable:
data_list.append((convert_data_array(item[0], filter_func=filter_func, converter_func=converter_func), item[1], item[2], item[3]))
return data_list
def data_array_from_data_iterable(data_iterable):
'''Convert data iterable to raw data numpy array.
Parameters
----------
data_iterable : iterable
Iterable where each element is a tuple with following content: (raw data, timestamp_start, timestamp_stop, status).
Returns
-------
data_array : numpy.array
concatenated data array
'''
try:
data_array = np.concatenate([item[0] for item in data_iterable])
except ValueError: # length is 0
data_array = np.empty(0, dtype=np.uint32)
return data_array
def is_tdc_from_channel(channel=4): # function factory
'''Selecting TDC data from given channel.
Parameters
----------
channel : int
Channel number (4 is default channel on Single Chip Card).
Returns
-------
Function.
Usage:
1 Selecting TDC data from channel 4 (combine with is_tdc_word):
filter_tdc_data_from_channel_4 = logical_and(is_tdc_word, is_tdc_from_channel(4))
tdc_data_from_channel_4 = data_array[filter_tdc_data_from_channel_4(data_array)]
'''
if channel >= 1 and channel < 8:
def f(value):
return np.equal(np.right_shift(np.bitwise_and(value, 0x70000000), 28), channel)
f.__name__ = "is_tdc_from_channel_" + str(channel) # or use inspect module: inspect.stack()[0][3]
return f
else:
raise ValueError('Invalid channel number')
def convert_tdc_to_channel(channel):
''' Converts TDC words at a given channel to common TDC header (0x4).
'''
def f(value):
filter_func = logical_and(is_tdc_word, is_tdc_from_channel(channel))
select = filter_func(value)
value[select] = np.bitwise_and(value[select], 0x0FFFFFFF)
value[select] = np.bitwise_or(value[select], 0x40000000)
f.__name__ = "convert_tdc_to_channel_" + str(channel)
return value
return f
def is_data_from_channel(channel=4): # function factory
'''Selecting FE data from given channel.
Parameters
----------
channel : int
Channel number (4 is default channel on Single Chip Card).
Returns
-------
Function.
Usage:
1 Selecting FE data from channel 4 (combine with is_fe_word):
filter_fe_data_from_channel_4 = logical_and(is_fe_word, is_data_from_channel(4))
fe_data_from_channel_4 = data_array[filter_fe_data_from_channel_4(data_array)]
2 Sleceting data from channel 4:
filter_data_from_channel_4 = is_data_from_channel(4)
data_from_channel_4 = data_array[filter_data_from_channel_4(fe_data_array)]
3 Sleceting data from channel 4:
data_from_channel_4 = is_data_from_channel(4)(fe_raw_data)
Other usage:
f_ch4 = functoools.partial(is_data_from_channel, channel=4)
l_ch4 = lambda x: is_data_from_channel(x, channel=4)
'''
if channel >= 0 and channel < 16:
def f(value):
return np.equal(np.right_shift(np.bitwise_and(value, 0x0F000000), 24), channel)
f.__name__ = "is_data_from_channel_" + str(channel) # or use inspect module: inspect.stack()[0][3]
return f
else:
raise ValueError('Invalid channel number')
def logical_and(f1, f2): # function factory
'''Logical and from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function.
Usage:
filter_func=logical_and(is_data_record, is_data_from_channel(4)) # new filter function
filter_func(array) # array that has Data Records from channel 4
'''
def f(value):
return np.logical_and(f1(value), f2(value))
f.__name__ = "(" + f1.__name__ + "_and_" + f2.__name__ + ")"
return f
def logical_or(f1, f2): # function factory
'''Logical or from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function.
'''
def f(value):
return np.logical_or(f1(value), f2(value))
f.__name__ = "(" + f1.__name__ + "_or_" + f2.__name__ + ")"
return f
def logical_not(f): # function factory
'''Logical not from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function.
'''
def f(value):
return np.logical_not(f(value))
f.__name__ = "not_" + f.__name__
return f
def logical_xor(f1, f2): # function factory
'''Logical xor from functions.
Parameters
----------
f1, f2 : function
Function that takes array and returns true or false for each item in array.
Returns
-------
Function.
'''
def f(value):
return np.logical_xor(f1(value), f2(value))
f.__name__ = "(" + f1.__name__ + "_xor_" + f2.__name__ + ")"
return f
def true(value):
return np.True_
def false(value):
return np.False_
def is_trigger_word(value):
return np.equal(np.bitwise_and(value, 0x80000000), 0x80000000)
def is_tdc_word(value):
return np.logical_and(np.equal(np.bitwise_and(value, 0x80000000), 0), np.greater(np.bitwise_and(value, 0x70000000), 0))
def is_fe_word(value):
return np.equal(np.bitwise_and(value, 0xF0000000), 0)
def is_data_header(value):
return np.equal(np.bitwise_and(value, 0x00FF0000), 0b111010010000000000000000)
def is_address_record(value):
return np.equal(np.bitwise_and(value, 0x00FF0000), 0b111010100000000000000000)
def is_value_record(value):
return np.equal(np.bitwise_and(value, 0x00FF0000), 0b111011000000000000000000)
def is_service_record(value):
return np.equal(np.bitwise_and(value, 0x00FF0000), 0b111011110000000000000000)
def is_data_record(value):
return np.logical_and(np.logical_and(np.less_equal(np.bitwise_and(value, 0x00FE0000), 0x00A00000), np.less_equal(np.bitwise_and(value, 0x0001FF00), 0x00015000)), np.logical_and(np.not_equal(np.bitwise_and(value, 0x00FE0000), 0x00000000), np.not_equal(np.bitwise_and(value, 0x0001FF00), 0x00000000)))
def get_trigger_data(value, mode=0):
'''Returns 31bit trigger counter (mode=0), 31bit timestamp (mode=1), 15bit timestamp and 16bit trigger counter (mode=2)
'''
if mode == 2:
return np.right_shift(np.bitwise_and(value, 0x7FFF0000), 16), np.bitwise_and(value, 0x0000FFFF)
else:
return np.bitwise_and(value, 0x7FFFFFFF)
def get_address_record_address(value):
'''Returns the address in the address record.
'''
return np.bitwise_and(value, 0x0000EFFF)
def get_address_record_type(value):
'''Returns the type in the address record.
'''
return np.right_shift(np.bitwise_and(value, 0x00008000), 14)
def get_value_record(value):
'''Returns the value in the value record.
'''
return np.bitwise_and(value, 0x0000FFFF)
def get_col_row_tot_array_from_data_record_array(array): # TODO: max ToT
'''Convert raw data array to column, row, and ToT array.
Parameters
----------
array : numpy.array
Raw data array.
Returns
-------
Tuple of arrays.
'''
def get_col_row_tot_1_array_from_data_record_array(value):
return np.right_shift(np.bitwise_and(value, 0x00FE0000), 17), np.right_shift(np.bitwise_and(value, 0x0001FF00), 8), np.right_shift(np.bitwise_and(value, 0x000000F0), 4)
def get_col_row_tot_2_array_from_data_record_array(value):
return np.right_shift(np.bitwise_and(value, 0x00FE0000), 17), np.add(np.right_shift(np.bitwise_and(value, 0x0001FF00), 8), 1), np.bitwise_and(value, 0x0000000F)
col_row_tot_1_array = np.column_stack(get_col_row_tot_1_array_from_data_record_array(array))
col_row_tot_2_array = np.column_stack(get_col_row_tot_2_array_from_data_record_array(array))
# interweave array here
col_row_tot_array = np.vstack((col_row_tot_1_array.T, col_row_tot_2_array.T)).reshape((3, -1), order='F').T # http://stackoverflow.com/questions/5347065/interweaving-two-numpy-arrays
# remove ToT > 14 (late hit, no hit) from array, remove row > 336 in case we saw hit in row 336 (no double hit possible)
try:
col_row_tot_array_filtered = col_row_tot_array[col_row_tot_array[:, 2] < 14] # [np.logical_and(col_row_tot_array[:,2]<14, col_row_tot_array[:,1]<=336)]
except IndexError:
# logging.warning('Array is empty')
return np.array([], dtype=np.dtype('>u4')), np.array([], dtype=np.dtype('>u4')), np.array([], dtype=np.dtype('>u4'))
return col_row_tot_array_filtered[:, 0], col_row_tot_array_filtered[:, 1], col_row_tot_array_filtered[:, 2] # column, row, ToT
def get_col_row_array_from_data_record_array(array):
col, row, _ = get_col_row_tot_array_from_data_record_array(array)
return col, row
def get_row_col_array_from_data_record_array(array):
col, row, _ = get_col_row_tot_array_from_data_record_array(array)
return row, col
def get_tot_array_from_data_record_array(array):
_, _, tot = get_col_row_tot_array_from_data_record_array(array)
return tot
def get_occupancy_mask_from_data_record_array(array, occupancy):
pass # TODO:
def get_col_row_iterator_from_data_records(array): # generator
for item in np.nditer(array): # , flags=['multi_index']):
yield np.right_shift(np.bitwise_and(item, 0x00FE0000), 17), np.right_shift(np.bitwise_and(item, 0x0001FF00), 8)
if np.not_equal(np.bitwise_and(item, 0x0000000F), 15):
yield np.right_shift(np.bitwise_and(item, 0x00FE0000), 17), np.add(np.right_shift(np.bitwise_and(item, 0x0001FF00), 8), 1)
def get_row_col_iterator_from_data_records(array): # generator
for item in np.nditer(array, flags=['multi_index']):
yield np.right_shift(np.bitwise_and(item, 0x0001FF00), 8), np.right_shift(np.bitwise_and(item, 0x00FE0000), 17)
if np.not_equal(np.bitwise_and(item, 0x0000000F), 15):
yield np.add(np.right_shift(np.bitwise_and(item, 0x0001FF00), 8), 1), np.right_shift(np.bitwise_and(item, 0x00FE0000), 17)
def get_col_row_tot_iterator_from_data_records(array): # generator
for item in np.nditer(array, flags=['multi_index']):
yield np.right_shift(np.bitwise_and(item, 0x00FE0000), 17), np.right_shift(np.bitwise_and(item, 0x0001FF00), 8), np.right_shift(np.bitwise_and(item, 0x000000F0), 4) # col, row, ToT1
if np.not_equal(np.bitwise_and(item, 0x0000000F), 15):
yield np.right_shift(np.bitwise_and(item, 0x00FE0000), 17), np.add(np.right_shift(np.bitwise_and(item, 0x0001FF00), 8), 1), np.bitwise_and(item, 0x0000000F) # col, row+1, ToT2
def get_tot_iterator_from_data_records(array): # generator
for item in np.nditer(array, flags=['multi_index']):
yield np.right_shift(np.bitwise_and(item, 0x000000F0), 4) # ToT1
if np.not_equal(np.bitwise_and(item, 0x0000000F), 15):
yield np.bitwise_and(item, 0x0000000F) # ToT2
def build_events_from_raw_data(array):
idx = np.where(is_trigger_word(array))[-1]
if idx.shape[0] == 0:
return [array]
else:
return np.split(array, idx)
def interpret_pixel_data(data, dc, pixel_array, invert=True):
'''Takes the pixel raw data and interprets them. This includes consistency checks and pixel/data matching.
The data has to come from one double column only but can have more than one pixel bit (e.g. TDAC = 5 bit).
Parameters
----------
data : numpy.ndarray
The raw data words.
dc : int
The double column where the data is from.
pixel_array : numpy.ma.ndarray
The masked numpy.ndarrays to be filled. The masked is set to zero for pixels with valid data.
invert : boolean
Invert the read pixel data.
'''
# data validity cut, VR has to follow an AR
index_value = np.where(is_address_record(data))[0] + 1 # assume value record follows address record
index_value = index_value[is_value_record(data[index_value])] # delete all non value records
index_address = index_value - 1 # calculate address record indices that are followed by an value record
# create the pixel address/value arrays
address = get_address_record_address(data[index_address])
value = get_value_record(data[index_address + 1])
# split array for each bit in pixel data, split is done on decreasing address values
address_split = np.array_split(address, np.where(np.diff(address.astype(np.int32)) < 0)[0] + 1)
value_split = np.array_split(value, np.where(np.diff(address.astype(np.int32)) < 0)[0] + 1)
if len(address_split) > 5:
pixel_array.mask[dc * 2, :] = True
pixel_array.mask[dc * 2 + 1, :] = True
logging.warning('Invalid pixel data for DC %d', dc)
return
mask = np.empty_like(pixel_array.data) # BUG in numpy: pixel_array is de-masked if not .data is used
mask[:] = len(address_split)
for bit, (bit_address, bit_value) in enumerate(zip(address_split, value_split)): # loop over all bits of the pixel data
# error output, pixel data is often corrupt for FE-I4A
if len(bit_address) == 0:
logging.warning('No pixel data for DC %d', dc)
continue
if len(bit_address) != 42:
logging.warning('Some pixel data missing for DC %d', dc)
if (np.any(bit_address > 672)):
RuntimeError('Pixel data corrupt for DC %d', dc)
# set pixel that occurred in the data stream
pixel = []
for i in bit_address:
pixel.extend(range(i - 15, i + 1))
pixel = np.array(pixel)
# create bit set array
value_new = bit_value.view(np.uint8) # interpret 32 bit numpy array as uint8 to be able to use bit unpacking; byte unpacking is not supported yet
if invert:
value_new = np.invert(value_new) # read back values are inverted
value_new = np.insert(value_new[::4], np.arange(len(value_new[1::4])), value_new[1::4]) # delete 0 padding
value_bit = np.unpackbits(value_new, axis=0)
if len(address_split) == 5: # detect TDAC data, here the bit order is flipped
bit_set = len(address_split) - bit - 1
else:
bit_set = bit
pixel_array.data[dc * 2, pixel[pixel >= 336] - 336] = np.bitwise_or(pixel_array.data[dc * 2, pixel[pixel >= 336] - 336], np.left_shift(value_bit[pixel >= 336], bit_set))
pixel_array.data[dc * 2 + 1, pixel[pixel < 336]] = np.bitwise_or(pixel_array.data[dc * 2 + 1, pixel[pixel < 336]], np.left_shift(value_bit[pixel < 336], bit_set)[::-1])
mask[dc * 2, pixel[pixel >= 336] - 336] = mask[dc * 2, pixel[pixel >= 336] - 336] - 1
mask[dc * 2 + 1, pixel[pixel < 336]] = mask[dc * 2 + 1, pixel[pixel < 336]] - 1
pixel_array.mask[np.equal(mask, 0)] = False
| 2.53125
| 3
|
tests/test_utils.py
|
eric-erki/autokeras
| 1
|
12780535
|
from autokeras.generator import DefaultClassifierGenerator
from autokeras.utils import *
from tests.common import get_processed_data
def test_model_trainer():
model = DefaultClassifierGenerator(3, (28, 28, 3)).generate().produce_model()
train_data, test_data = get_processed_data()
ModelTrainer(model, train_data, test_data, False).train_model(max_iter_num=3)
| 2.34375
| 2
|
python/ray/serve/tests/test_config_files/pizza.py
|
jianoaix/ray
| 0
|
12780536
|
<reponame>jianoaix/ray
from enum import Enum
from typing import List, Dict, TypeVar
import ray
from ray import serve
import starlette.requests
from ray.serve.drivers import DAGDriver
from ray.serve.deployment_graph import InputNode
RayHandleLike = TypeVar("RayHandleLike")
class Operation(str, Enum):
ADDITION = "ADD"
MULTIPLICATION = "MUL"
@serve.deployment(ray_actor_options={"num_cpus": 0.15})
class Router:
def __init__(self, multiplier: RayHandleLike, adder: RayHandleLike):
self.adder = adder
self.multiplier = multiplier
def route(self, op: Operation, input: int) -> int:
if op == Operation.ADDITION:
return ray.get(self.adder.add.remote(input))
elif op == Operation.MULTIPLICATION:
return ray.get(self.multiplier.multiply.remote(input))
@serve.deployment(
user_config={
"factor": 3,
},
ray_actor_options={"num_cpus": 0.15},
)
class Multiplier:
def __init__(self, factor: int):
self.factor = factor
def reconfigure(self, config: Dict):
self.factor = config.get("factor", -1)
def multiply(self, input_factor: int) -> int:
return input_factor * self.factor
@serve.deployment(
user_config={
"increment": 2,
},
ray_actor_options={"num_cpus": 0.15},
)
class Adder:
def __init__(self, increment: int):
self.increment = increment
def reconfigure(self, config: Dict):
self.increment = config.get("increment", -1)
def add(self, input: int) -> int:
return input + self.increment
@serve.deployment(ray_actor_options={"num_cpus": 0.15})
def create_order(amount: int) -> str:
return f"{amount} pizzas please!"
async def json_resolver(request: starlette.requests.Request) -> List:
return await request.json()
# Overwritten by user_config
ORIGINAL_INCREMENT = 1
ORIGINAL_FACTOR = 1
with InputNode() as inp:
operation, amount_input = inp[0], inp[1]
multiplier = Multiplier.bind(ORIGINAL_FACTOR)
adder = Adder.bind(ORIGINAL_INCREMENT)
router = Router.bind(multiplier, adder)
amount = router.route.bind(operation, amount_input)
order = create_order.bind(amount)
serve_dag = DAGDriver.bind(order, http_adapter=json_resolver)
| 2.546875
| 3
|
datafreezer/__init__.py
|
thirawr/django-datafreezer-sample
| 0
|
12780537
|
default_app_config = 'datafreezer.apps.DatafreezerConfig'
| 1.09375
| 1
|
myia/abstract/__init__.py
|
strint/myia
| 222
|
12780538
|
<reponame>strint/myia
"""Abstract data and type/shape inference."""
from .aliasing import *
from .amerge import *
from .data import *
from .infer import *
from .loop import *
from .macro import *
from .ref import *
from .to_abstract import *
from .utils import *
| 0.746094
| 1
|
gui/feature_tool_add.py
|
mdvandamme/PoussePousseEditData
| 0
|
12780539
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Create new point. Synchonize with layer and file
-------------------
begin : 2018-07-11
git sha : $Format:%H$
author : <NAME>
***************************************************************************/
"""
from qgis.gui import QgsMapTool
from qgis.core import QgsMapLayer
from PyQt4.QtGui import QCursor
from PyQt4.QtCore import Qt
import util_table
import util_layer
import util_io
class FeatureToolAdd(QgsMapTool):
def __init__(self, canvas):
super(QgsMapTool, self).__init__(canvas)
self.canvas = canvas
self.cursor = QCursor(Qt.CrossCursor)
def activate(self):
self.canvas.setCursor(self.cursor)
def setTable(self, table):
self.table = table
def setLayer(self, layer):
self.layer = layer
def setUrl(self, url):
self.url = url
def canvasReleaseEvent(self, mouseEvent):
"""
Each time the mouse is clicked on the map canvas, perform
the following tasks:
...
"""
layerGrille = None
for layer in self.canvas.layers():
if layer.type() == QgsMapLayer.VectorLayer:
if (layer.name() == util_layer.CONST_NOM_LAYER_GRILLE):
layerGrille = layer
p = mouseEvent.pos()
# Determine the location of the click in real-world coords
layerPoint = self.toLayerCoordinates( layerGrille, p )
# =============================================================================
# Ajout dans le layer, tableau, fichier
# On ajoute la ligne au tableau
self.table = util_table.addLigne(self.table, layerPoint.x(), layerPoint.y())
# On enregistre dans le fichier
util_io.addLigne(self.url, layerPoint.x(), layerPoint.y())
# On synchronise avec le layer
self.layer = util_layer.addPointLayer(self.layer, layerPoint.x(), layerPoint.y())
# ====================================================================
# Un petit refresh
# QgsMapLayerRegistry.instance().addMapLayer(self.layer)
self.canvas.refresh();
| 2.734375
| 3
|
credential_test.py
|
davidngatia/Locker
| 0
|
12780540
|
import unittest
from credential import Credential
class TestCredential(unittest.TestCase):
"""
Test class that defines test cases for the credential class behavioursself.
Args:
unittest.TestCase:TestCase class that helps in creating test cases
"""
def setUp(self):
"""
Set up method to run before each test casesself.
"""
self.new_credential = Credential("Twitter","Davidngatia","4321")
def tearDown(self):
"""
tearDown method that does clean up after each test case has run.
"""
Credential.credential_list = []
def test_init(self):
"""
test_init test case to test if the object is initialized properly
"""
self.assertEqual(self.new_credential.account_type,"Twitter")
self.assertEqual(self.new_credential.user_name,"Davidngatia")
self.assertEqual(self.new_credential.password,"<PASSWORD>")
def test_generate_password(self):
"""
method that generates a password
"""
self.assertEqual(Credential.password(),Credential.password)
def test_save_credential(self):
"""
test save credential test case to test if the contact object is saved into the credential_list
"""
self.new_credential.save_credential()
self.assertEqual(len(Credential.credential_list),1)
def test_save_multiple_credentials(self):
"""
test_save_multiple_credentials to check if it can save multiple credentials objects to our credential_list
"""
self.new_credential.save_credential()
test_credential=Credential("Facebook","Davidngatia","09876")
test_credential.save_credential()
self.assertEqual(len(Credential.credential_list),2)
def test_display_credentials(self):
"""
method that returns a list of all credentials save_credential
"""
self.assertEqual(Credential.display_credentials(),Credential.credential_list)
if __name__ == '__main__':
unittest.main()
| 3.890625
| 4
|
practice/ai/astar-search/n-puzzle/n-puzzle.py
|
zeyuanxy/HackerRank
| 4
|
12780541
|
import copy
def main():
directions = [[-1, 0, 'UP'], [0, -1, 'LEFT'], [0, 1, 'RIGHT'], [1, 0, 'DOWN']]
n = int(input())
input_grid = []
for i in range(n * n):
x = int(input())
input_grid.append(x)
queue = []
answer_routes = None
mem = set()
queue.append([input_grid, [], 1000])
while len(queue) > 0:
grid, routes, score = queue.pop(0)
if score == 0:
answer_routes = routes
break
x, y = 0, 0
for i in range(n * n):
if grid[i] == 0:
x, y = i / n, i % n
possible_moves = []
for direction in directions:
next_x, next_y = x + direction[0], y + direction[1]
if next_x < 0 or next_x >= n or next_y < 0 or next_y >= n:
continue
new_grid = copy.deepcopy(grid)
new_grid[x * n + y] = grid[next_x * n + next_y]
new_grid[next_x * n + next_y] = 0
hashed_new_grid = ''.join(str(x) for x in new_grid)
if hashed_new_grid in mem:
continue
mem.add(hashed_new_grid)
new_score = 0
for i in range(n * n):
new_score += abs(new_grid[i] - i)
new_routes = copy.deepcopy(routes)
new_routes.append(direction[2])
queue.append([new_grid, new_routes, new_score])
queue.sort(key = lambda x: x[2])
print len(answer_routes)
for direction in answer_routes:
print direction
if __name__ == "__main__":
main()
| 3.3125
| 3
|
routers.py
|
LeeLeah/permov
| 10
|
12780542
|
<reponame>LeeLeah/permov
from handlers import Index
from handlers import Img
from handlers import Search
from handlers import User
from handlers import SomePage
from handlers import Admin
from handlers import VIP
route = [
(r"/",Index.index),
(r"/([0-9]+)/?",Img.category),
(r"/addLink/?",Img.addLink),
(r"/addCate/?",VIP.addCate),
(r"/Admin/addviptime/?",Admin.AddVipTime),
(r"/Admin/suggest/?",Admin.AdminSuggest),
(r"/addKeyword/?",Img.addKeyword),
(r"/album/?",Index.album),
(r"/clearMessage/?",User.ClearMessage),
(r"/comment/?",Img.comment),
(r"/delComment/?",User.delComment),
(r"/FollowUser/?",User.FollowUserAction),
(r"/FollowCate/?",User.FollowCate),
(r"/img/([0-9a-zA-Z\_\-]+)/?",Img.img),
(r"/nextimg/(.+)/?",Img.next),
(r"/message/?",SomePage.Message),
(r"/myAlbum/?",VIP.myAlbum),
(r"/myAlbum/edit/([0-9]+)/?",VIP.editMyAlbum),
(r"/search/?",Search.search),
(r"/tag/(.+)/?",Search.tags),
(r"/login/?",User.login),
(r"/logout/?",User.logout),
(r"/qq/?",User.QQ),
(r"/QuitFollow/?",User.QuitFollow),
(r"/QuitCate/?",User.QuitCate),
(r"/reg/?",User.Reg),
(r"/rule/?",SomePage.Rule),
(r"/suggest/?",SomePage.Suggest),
(r"/upload/?",Index.qiniu),
(r"/upjson/?",Index.upjson),
(r"/user/?",User.home),
(r"/user/(.+)/?",User.UserPage),
(r"/vip/?",SomePage.VIP),
(r"/([0-9a-zA-Z\_\-]+)/edit/?",User.editImg),
]
| 1.859375
| 2
|
ptr_lib.py
|
alexBoava/ptr_lib
| 0
|
12780543
|
<gh_stars>0
#
# Copyright (c) 2014 R.Pissardini <rodrigo AT pissardini DOT com>
# Copyright (c) 2018 R.Pissardini <rodrigo AT pissardini DOT com> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from math import *
import datetime
#Computation of distance
def diff_simple(ai,af):
return sqrt(pow(af-ai,2))
def cartesian_distance (x, y, xf,yf):
distance = sqrt(((xf-x)**2)+ ((yf-y)**2))
return distance
def spheric_cosines(lat1,lon1,lat2,lon2,earth_radius):
delta_lat = lat2 - lat1;
delta_lon = lon2 - lon1;
distance = acos(sin(radians(lat1))\
* sin(radians(lat2)) +cos(radians(lat1))*\
cos(radians(lat2)) * cos(radians(delta_lon)))* earth_radius
return distance
def harvesine (lat1, lon1, lat2,lon2, earth_radius):
delta_lat = lat2 - lat1
delta_lon = lon2 - lon1
alpha = delta_lat * 0.5;
beta = delta_lon * 0.5;
a = sin(radians(alpha))* sin(radians(alpha))+\
cos(radians(lat1))*cos(radians(lat2)) *\
sin(radians(beta)) * sin(radians(beta));
c = 2 * atan2((a)*0.5, (1-a)*0.5)
distance = earth_radius * c
return distance
def equirec_approximation (lat1, lon1, lat2,lon2, earth_radius): # Equirectangular approximation
x = (lon2-lon1) * cos(lat1+lat2)/2
y = lat2 - lat1
d = sqrt(x * x + y * y) * earth_radius
return d
#Computation of angles
def angle_between_coordinates (xi, yi, xf, yf):
angle = atan2(yf -yi, xf -xi)
return angle
def bearing (lat1,lon1,lat2,lon2):
y = sin(lon2-lon1) * cos(lat2)
x = cos(lat1)*sin(lat2)-\
sin(lat1)*cos(lat2)*\
cos(lon2-lon1)
b = atan2(y,x) #radians
return b
#Computation of new coordinates
def polar_coordinates (x, y, distance, angle): #angle in degrees
xf = (distance * cos(radians(angle))) + x
yf = (distance * sin(radians(angle))) + y
return [xf,yf]
##Lenght of latitude and longitude
def lenght_latitude_longitude(value): #value in degrees (0.0)
lat = radians(value)
m1 = 111132.92
m2 = -559.82
m3 = 1.175
m4 = -0.0023
p1 = 111412.84
p2 = -93.5
p3 = 0.118
latlen = m1 + (m2 * cos(2 * lat)) + (m3 * cos(4 * lat)) +\
(m4 * cos(6 * lat))
longlen = (p1 * cos(lat)) + (p2 * cos(3 * lat)) +\
(p3 * cos(5 * lat))
return [latlen,longlen]
#Computation of date and time
def day_of_year(year,month,day):
doy = datetime.datetime(year, month, day).timetuple().tm_yday
return doy
def julian_date(year,month,day,hour,minute,second):
MJD0 = 2400000.5
b = 0
if (month <= 2):
month +=12
year -= 1
if ((10000*year+100*month+day) <= 15821004):
b = -2 + get_intpart (year+4716) - 1179
else:
b = get_intpart(year* 0.0025)- get_intpart(year* 0.01)+\
get_intpart(year *0.25)
mjdmidnight = 365 *year - 679004 + b + (30.6001*(month+1)) + day
fracofday = ((hour+ (minute/60)+ (second/3600)))/24
return MJD0 + mjdmidnight + fracofday
#Transformations
##Transformations between quaternions and Euler's angles
def quat2euler(qw,qx,qy,qz):
qw2 = qw * qw
qx2 = qx * qx
qy2 = qy * qy
qz2 = qz * qz
test= qx * qy + qz * qw
Y = 0.0
Z = 0.0
X = 0.0
if (test > 0.499):
Y = 360/pi * atan2(qx,qw)
Z = 90
X = 0
return [X,Y,Z]
if (test < -0.499):
Y = -360/pi*atan2(qx,qw)
Z = -90
X = 0
return [X,Y,Z]
h = atan2(2 * qy * qw - 2 * qx * qz, 1 - 2 * qy * qy - 2 * qz * qz)
a = asin (2 * qx * qy + 2 * qz * qw)
b = atan2(2 * qx * qw - 2 * qy * qz, 1 - 2 * qx * qx - 2 * qz * qz)
Y = h * 180/pi
Z = a * 180/pi
X = b * 180/pi
return [X, Y, Z]
def euler2quat(X,Y,Z):
h = Y * pi/360
a = Z * pi/360
b = X * pi/360
c1 = cos(h)
c2 = cos(a)
c3 = cos(b)
s1 = sin(h)
s2 = sin(a)
s3 = sin(b)
qw = ((c1 * c2 * c3 - s1 * s2 * s3)* 100000)/100000
qx = ((s1 * s2 * c3 + c1 * c2 * s3)* 100000)/100000
qy = ((s1 * c2 * c3 + c1 * s2 * s3)* 100000)/100000
qz = ((c1 * s2 * c3 - s1 * c2 * s3)* 100000)/100000
return [qw, qx, qy, qz]
##rotation of coordinates
def rotation_coordinates(x, y, angle): #angle in degrees
xf = x * cos(radians(angle))+ y * sin(radians(angle))
yf = -x * sin(radians(angle))+ y * cos(radians(angle));
return [xf,yf]
#Transformations between reference systems
def geodetic2cartesian(lat,lon,h, a =6378137, b=6356752.314140347): #SIRGAS
e2 = (pow(a,2) -pow(b,2))/pow(a,2)
N = a/(pow(1. -e2 * pow(sin(radians(lat)),2), 0.5))
X = (N+h) * cos(radians(lat)) * cos(radians(lon))
Y = (N+h) * cos(radians(lat)) * sin(radians(lon))
Z = ((1.-e2) * N + h) * sin(radians(lat))
return [X,Y,Z]
def cartesian2geodetic (X, Y, Z, a = 6378137,b = 6356752.314140347): #SIRGAS
H = 0
v = 0
e2 = (pow(a,2) -pow(b,2))/pow(a,2)
p = pow(pow(X,2)+pow(Y,2),0.5)
lat = atan2(Z, p*(1-e2))
lat1 = 2 * pi
while fabs(lat1-lat) > 1e-15:
v = a/pow((1- e2* pow(sin(lat),2)),0.5)
H = p/cos(lat)- v
lat1 = lat
lat = atan2(Z + e2 * v * sin(lat),p)
lat = degrees(lat) #in degrees
lon = degrees(atan2(radians(Y), radians(X))) #in degrees
return [lat,lon,H]
def geodetic2enu (lat, lon, h, a = 6378137, b = 6356752.314140347):
""" Convert from geodetic to a different ENU local coordinate system.
East -- is the longitude multiplied by the radius of the small circle at that latitude
North -- is the product of the geodetic latitue by the semi-major axis of the ellipsoid
Up -- is the geodetic height
Keyword arguments:
lat -- latitude in degrees
lon -- longitude in degrees
h -- geodetic height in meters
a -- semi-major axis (default SIRGAS)
b -- semi-minor axis (default SIRGAS)
"""
e2 = (pow(a,2) -pow(b,2))/pow(a,2)
lat = radians(lat)
v = a/pow((1- e2* pow(sin(lat),2)),0.5)
small_circle = v * cos(lat)
if (lon < 0):
lon+=360
E = radians(lon) * small_circle
N = lat * a
U = h
return [E, N, U]
def helmert_transformation (X,Y,Z,tx,ty,tz,s,rx,ry,rz,a= 6378137,b=6356752.314140347):
xp = tx + ((1 + s) * X) - (rz * Y) + (ry * Z)
yp = ty + (rz * X) + ((1 + s) * Y) - (rx * Z)
zp = tz - (ry * X) + (rx * Y) + ((1 + s) * Z)
return [xp,yp,zp]
def sad2sirgas(x,y,z): #SAD 69 to SIRGAS 2000
xf = x - 67.35
yf = y + 3.88
zf = z - 38.22
return [xf,yf,zf]
def sirgas2sad(x,y,z): #SIRGAS 2000 to SAD69
xf = x + 67.35
yf = y - 3.88
zf = z + 38.22
return [xf,yf,zf]
def corregoalegre2sirgas(x,y,z): #Córrego Alegre to SIRGAS 2000
xf = x - 206.048
yf = y + 168.279
zf = z - 3.283
return [xf,yf,zf]
def sirgas2corregoalegre(x,y,z): #SIRGAS 2000 to Córrego Alegre
xf = x + 206.048
yf = y - 168.279
zf = z + 3.283
return [xf,yf,zf]
#Conversions
def arcsec2radians (seconds):
radians = seconds * 0.000004848
return radians
def radians2arcsec (radians):
seconds = radians * 206264.806247096
return seconds
def dms2decimal (degrees, minutes, seconds, direction): #direction - N- S- W- E
if (direction=='S' or direction=='E'):
signal = -1
elif (direction=='N' or direction=='S'):
signal = 1
else:
print('[Error] Insert a correct direction [ N, S, W or E]\n')
return
decimal = signal * (int(degrees) + float(minutes) / 60 + float(seconds) / 3600)
return decimal
def decimal2dms (decimal, direction): #N- E
degrees = int(decimal)
minutes = int (abs((decimal - int(decimal)) * 60))
seconds = abs((abs((decimal - int(decimal)) * 60)-minutes)*60)
if (direction=='N'):
if (decimal <0):
direction ='S'
elif (direction =='E'):
if (decimal <0):
direction ='W'
else:
print('[Error] Insert a correct direction [N or E]\n')
return
return [degrees,minutes,seconds,direction]
#skyplot and other charts
import matplotlib.pyplot as plt
def skyplot (prn,e,az): #input lists of prn (or svid), elevation and azimuths
ax = plt.subplot(111, projection='polar')
ax.set_theta_zero_location("N")
ax.set_theta_direction(-1)
ax.set_ylim(0,90)
ax.set_yticks(np.arange(0,91,30))
ax.set_yticklabels(ax.get_yticks()[::-1])
for sv, elev, azim in zip(prn, e, az):
ax.plot(math.radians(azim), 90-elev,color='green', marker='o', markersize=20)
ax.text(math.radians(azim), 90-elev, sv, ha='center', va='center',color='white')
plt.show()
#General functions
def get_intpart (value):
ipart = modf(value)
return ipart[1]
def get_fracpart (value):
fpart = modf(value)
return fpart[0]
def polyRegression(ts, poly=2):
'''
polyRegression is a function to calculate the POLINOMIAL REGRESSION of a time series until the 5º term.
y = B0 + B1.x + B2.x^2 + B3.x^3 ... Bn.x^n
INPUT:
ts - the time series
poly - number of terms
OUTPUT:
B - Regression output parameters
ts_trend - Regression calculated coordinates. Output to construct graphs or calculate error of regression.
'''
if poly > 5:
print("\n \t \t A quantidade de termos deve ser menor ou igual a 5 \n")
else:
ts_trend=[]
lista = []
soma = 0.0
Z = ts
for i in range(poly):
lista.append([])
for i in range(poly):
for ind in range(len(ts)):
lista[i].append([float(ts.index[ind])**i])
A = np.hstack(lista)
At = A.transpose()
result1 = (At.dot(A))
result1 = np.linalg.inv(result1)
result2 = (At.dot(Z))
B = result2.dot(result1)
for x in range(len(ts)):
soma = 0.0
for i in range(len(B)):
soma = soma + (B[i] * pow(x,i))
ts_trend.append(soma)
return B, ts_trend
| 1.945313
| 2
|
lessons/troubleshooting-chain/get-phone-ip-from-ext.py
|
wouyang628/nrelabs-curriculum
| 1
|
12780544
|
#!/usr/bin/python2.7
import requests
import xml.etree.ElementTree as ET
import sys
import json
def out(*args):
if sys.stdout.isatty():
print ', '.join(args[0]["hosts"])
else:
print json.dumps(args[0])
def myfunc(*args):
host = args[0]["host"][0]
port = args[0]["port"][0]
username = args[0]["username"][0]
secret = args[0]["password"][0]
phone = args[0]["phone"][0]
baseurl = "http://" + host + ":" + port + "/mxml"
params = {"action": "login", "username": username, "secret": secret}
resp = requests.get(baseurl, params=params)
cookies = resp.cookies
resp = requests.get(baseurl, cookies=cookies, params={"action": "PJSIPShowRegistrationInboundContactStatuses"})
contacts = str(resp.content)
root = ET.fromstring(contacts)
contacts = root.findall(".//*[@event='ContactStatusDetail']")
result = None
for contact in contacts:
if contact.get("aor") == phone:
result = contact.get("viaaddress").split(":")[0]
break
if result:
out({"hosts": [result]})
myfunc_args = dict()
if not sys.stdin.isatty():
for line in sys.stdin:
lined = json.loads(line)
for k, v in lined.items():
if k in myfunc_args:
myfunc_args[k] += v
else:
myfunc_args[k] = v
for arg in sys.argv[1:]:
kvp = arg.split("--", 1)[1].split("=", 1)
k = kvp[0]
v = kvp[1].split(",")
if k in myfunc_args:
myfunc_args[k] += v
else:
myfunc_args[k] = v
myfunc(myfunc_args)
| 2.578125
| 3
|
Chapter09/c9_52_impact_of_correlation_on_efficient_frontier_notWorking.py
|
John-ye666/Python-for-Finance-Second-Edition
| 236
|
12780545
|
<filename>Chapter09/c9_52_impact_of_correlation_on_efficient_frontier_notWorking.py
"""
Name : c9_52_impacto_of_correlation_on_efficient_frontier.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : <NAME>
Date : 6/6/2017
email : <EMAIL>
<EMAIL>
"""
from matplotlib.finance import quotes_historical_yahoo_ochl as getData
import matplotlib.pyplot as plt
import numpy as np, pandas as pd, scipy as sp
from numpy.linalg import inv, pinv
begYear,endYear = 2012,2016
stocks=['IBM','WMT']
def ret_monthly(ticker): # function 1
x = getData(ticker,(begYear,1,1),(endYear,12,31),asobject=True,adjusted=True)
logret=np.log(x.aclose[1:]/x.aclose[:-1])
date=[]
d0=x.date
for i in range(0,np.size(logret)):
date.append(''.join([d0[i].strftime("%Y"),d0[i].strftime("%m")]))
y=pd.DataFrame(logret,date,columns=[ticker])
return y.groupby(y.index).sum()
def std_f(ticker):
x=ret_monthly(ticker)
return sp.std(x)
def objFunction(W, R, target_ret):
stock_mean=np.mean(R,axis=0)
port_mean=np.dot(W,stock_mean) # portfolio mean
#cov=np.cov(R.T) # var-cov matrix
cov=cov0
port_var=np.dot(np.dot(W,cov),W.T) # portfolio variance
penalty = 2000*abs(port_mean-target_ret)# penalty 4 deviation
return np.sqrt(port_var) + penalty # objective function
R0=ret_monthly(stocks[0]) # starting from 1st stock
n_stock=len(stocks) # number of stocks
std1=std_f(stocks[0])
std2=std_f(stocks[1])
for jj in sp.arange(1):
k=0.1*std1*std2
#cov0=sp.array([[0.00266285,0.00037303],[0.00037303,0.0021296]])
#cov0=sp.array([[std1**2,k],[k,std2**2]])
cov0=sp.array([[std1**2,0.00037303],[0.00037303,std2**2]])
for i in xrange(1,n_stock): # merge with other stocks
x=ret_monthly(stocks[i])
R0=pd.merge(R0,x,left_index=True,right_index=True)
R=np.array(R0)
out_mean,out_std,out_weight=[],[],[]
stockMean=np.mean(R,axis=0)
for r in np.linspace(np.min(stockMean),np.max(stockMean),num=100):
W = np.ones([n_stock])/n_stock # starting from equal weights
b_ = [(0,1)
for i in range(n_stock)] # bounds, here no short
c_ = ({'type':'eq', 'fun': lambda W: sum(W)-1. })#constraint
result=sp.optimize.minimize(objFunction,W,(R,r),method='SLSQP',constraints=c_, bounds=b_)
if not result.success: # handle error raise
BaseException(result.message)
out_mean.append(round(r,4)) # 4 decimal places
std_=round(np.std(np.sum(R*result.x,axis=1)),6)
out_std.append(std_)
out_weight.append(result.x)
plt.title('Efficient Frontier')
plt.xlabel('Standard Deviation of the porfolio (Risk))')
plt.ylabel('Return of the portfolio')
plt.figtext(0.5,0.75,str(n_stock)+' stock are used: ')
plt.figtext(0.5,0.7,' '+str(stocks))
plt.figtext(0.5,0.65,'Time period: '+str(begYear)+' ------ '+str(endYear))
plt.plot(out_std,out_mean,'--')
plt.show()
| 2.4375
| 2
|
setup.py
|
asottile/editdistance-s
| 8
|
12780546
|
import platform
import sys
from setuptools import setup
if platform.python_implementation() == 'CPython':
try:
import wheel.bdist_wheel
except ImportError:
cmdclass = {}
else:
class bdist_wheel(wheel.bdist_wheel.bdist_wheel):
def finalize_options(self) -> None:
self.py_limited_api = f'cp3{sys.version_info[1]}'
super().finalize_options()
cmdclass = {'bdist_wheel': bdist_wheel}
else:
cmdclass = {}
setup(cffi_modules=['editdistance_s_build.py:ffibuilder'], cmdclass=cmdclass)
| 1.78125
| 2
|
mail_sender.py
|
PaulinaKomorek/UAM
| 0
|
12780547
|
import smtplib
from email.message import EmailMessage
def send_mail(mail: str, name: str):
user = ''
password = ''
text="Hello " + name + ", \n your account has been created succesfully"
msg = EmailMessage()
msg.set_content(text)
msg['Subject'] = "Confirmation"
msg['From'] = ""
msg['To'] = mail
s = smtplib.SMTP("", 465)
s.ehlo()
s.starttls()
s.login(user, password)
s.send_message(msg)
s.close()
| 2.9375
| 3
|
HACKERRANK/PROBLEM_SOLVING/DATA_STRUCTURES/TREES/BINARY_SEARCH_TREE_INSERTION.py
|
WarlonZeng/Big4-Hackerrank
| 1
|
12780548
|
<gh_stars>1-10
"""
Node is defined as
self.left (the left child of the node)
self.right (the right child of the node)
self.data (the value of the node)"""
def insert(root,val):
#Enter you code here.
if root == None:
return Node(val)
else:
if val <= root.data:
current = insert(root.left, val)
root.left = current
else:
current = insert(root.right, val)
root.right = current
return root
| 3.984375
| 4
|
cybox/test/common/contributor_test.py
|
siemens/python-cybox
| 0
|
12780549
|
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.common import Contributor
from cybox.test import EntityTestCase
class TestContributor(EntityTestCase, unittest.TestCase):
klass = Contributor
_full_dict = {
'role': "Lead Tester",
'name': "<NAME>",
'email': "<EMAIL>",
'phone': "(123) 456-7890",
'organization': "Doc Brown Enterprises(tm)",
'date': {
'start_date': "1955-11-05",
'end_date': "1985-11-05",
},
'contribution_location': "Hill Valley, CA",
}
if __name__ == "__main__":
unittest.main()
| 2.21875
| 2
|
bin/delete_downstream.py
|
Lhior/TXPipe
| 9
|
12780550
|
"""
This script prints out the commands to delete all files generated by a pipeline,
downstream of a specified stage.
If one stage was wrong, and you need to re-run everything it affected, this script
will print out the commands to delete the relevant files to that re-running the pipeline
with resume=True will re-run the correct stages.
"""
import sys
sys.path.append('.')
import ceci
import txpipe
import yaml
import collections
import os
# start from a config file and a stage to delete
config = yaml.safe_load(open(sys.argv[1]))
stage_to_delete = sys.argv[2]
# get the stages we need
stage_names = [s['name'] for s in config['stages']]
pipeline = ceci.Pipeline(config['stages'], None)
stages = [ceci.PipelineStage.get_stage(stage_name) for stage_name in stage_names]
# build the mapping tag => stages depending on that tag
dependencies = collections.defaultdict(list)
for stage in stages:
for tag in stage.input_tags():
dependencies[tag].append(stage)
# initialize with deletng one stage and the tags it makes
tags_to_delete = ceci.PipelineStage.get_stage(stage_to_delete).output_tags()
stages_to_delete = {stage}
# loop through nstage times (the maximum it could be)
for i in range(len(stage_names)):
# take all tags we currently know we have to delete
for tag in tags_to_delete[:]:
# find out which stages to clear because they need
# this tag which we are deleting
deps = set(dependencies[tag])
for s in stages:
if s in deps:
# if we need to delete this stage,
# add its outputs to the tags to delete
tags_to_delete += s.output_tags()
# and it to the stages to delete
stages_to_delete.add(s)
tags_to_delete = list(set(tags_to_delete))
# now at the end we delete all tags output by stage to delete
for s in stages_to_delete:
for f in pipeline.find_outputs(s, config).values():
print(f"rm -f {f}")
| 2.9375
| 3
|