max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
factory_boss/generator.py
|
mariushelf/factory_boss
| 1
|
12776051
|
<reponame>mariushelf/factory_boss<filename>factory_boss/generator.py
from graphlib import TopologicalSorter
from typing import Dict, List
from factory_boss.entity import Entity
from factory_boss.instance import Instance, InstanceValue
from factory_boss.reference_resolver import ReferenceResolver
from factory_boss.relation_maker import RelationMaker
class Generator:
def __init__(self, spec: Dict):
self.spec = spec
self.resolver = ReferenceResolver()
def generate(
self, output_with_related_objects: bool = True
) -> Dict[str, List[Dict]]:
""" Generate a dictionary from entity name to list of generated instances """
self.complete_relation_specs(self.spec["entities"])
instances = self.make_instances()
instances = self.make_relations(instances)
self.resolver.resolve_references(instances)
plan = self.make_plan(instances)
self.execute_plan(plan)
dicts = self.instances_to_dict(
instances, with_related_objects=output_with_related_objects
)
return dicts
def complete_relation_specs(self, entities: Dict[str, Entity]):
""" Create value specs for the remote side of each relation. """
for ename, entity in entities.items():
for relation in entity.relations():
if relation.relation_strategy != "none":
target = entities[relation.target_entity]
remote_relation = relation.make_remote_spec(ename)
if remote_relation:
target.add_field(relation.remote_name, remote_relation)
def make_instances(self) -> List[Instance]:
""" Generate all `Instance`s including `InstanceValue`s """
n = 3 # generate 3 instances of each entity # TODO make configurable
instances: List[Instance] = []
for ename, ent in self.spec["entities"].items():
for i in range(n):
instance = ent.make_instance(overrides={})
instances.append(instance)
return instances
def make_relations(self, instances: List[Instance]) -> List[Instance]:
all_instances = instances
new_instances = all_instances
relation_maker = RelationMaker([], self.spec["entities"])
while new_instances:
relation_maker.add_known_instances(new_instances)
new_instances = relation_maker.make_relations(new_instances)
all_instances += new_instances
return all_instances
def make_plan(self, instances) -> List[InstanceValue]:
""" Return evaluation order of instance values """
sorter = TopologicalSorter()
for instance in instances:
for ivalue in instance.instance_values.values():
references = ivalue.resolved_references().values()
dependencies = [ref.resolved_target for ref in references]
sorter.add(ivalue, *dependencies)
plan = sorter.static_order()
plan = list(plan)
return plan
def execute_plan(self, plan):
for ivalue in plan:
ivalue.make_value()
def instances_to_dict(
self, instances: List[Instance], with_related_objects: bool = True
) -> Dict[str, List[Dict]]:
dicts: Dict[str, List[Dict]] = {}
for instance in instances:
ename = instance.entity.name
idict = instance.to_dict(with_related_objects)
try:
dicts[ename].append(idict)
except KeyError:
dicts[ename] = [idict]
return dicts
| 2.53125
| 3
|
xendbg/xen/vcpu.py
|
nspin/pyxendbg
| 0
|
12776052
|
<reponame>nspin/pyxendbg
from xendbg.xen._bindings import ffi, lib
class VCPU:
def __init__(self, domain, vcpu_id):
self.domain = domain
self.vcpu_id = vcpu_id
def get_context(self):
return self.domain.xen.xenctrl.vcpu_getcontext(self.domain.domid, self.vcpu_id)
def set_context(self, ctx):
return self.domain.xen.xenctrl.vcpu_setcontext(self.domain.domid, self.vcpu_id, ctx)
| 1.945313
| 2
|
creational/monostate/logic/__init__.py
|
Kozak24/Patterns
| 0
|
12776053
|
<gh_stars>0
from .character import Character, Archetype
| 1.070313
| 1
|
spoopy/refactored/classification/feature/feature_predictor.py
|
rodrigobressan/PADify
| 12
|
12776054
|
<gh_stars>10-100
import pickle
from typing import List, Tuple
import numpy as np
import os
from sklearn.metrics import accuracy_score
from refactored.classification.classifier import BaseClassifier
from refactored.feature_extraction.cnn_model import CnnModel
from refactored.io_utils import save_txt
from refactored.preprocessing.handler.datahandler import DataHandler, DiskHandler
from refactored.preprocessing.property.property_extractor import PropertyExtractor
from tools.classifier import evaluate_hter
from tools.file_utils import file_helper
class BasePredictor:
PRED_NAME = "y_pred.npy"
PROBA_NAME = "y_pred_proba.npy"
TRUE_NAME = "y_true.npy"
MODEL_NAME = "model.sav"
RESULTS_NAME = "results.txt"
INTRA_NAME = "intra"
INTER_NAME = "inter"
FINAL_NAME = "final"
META_NAME = "meta"
def __init__(self,
features_root_path: str,
base_output_path: str,
classifiers: List[BaseClassifier],
properties: List[PropertyExtractor],
models: List[CnnModel],
data_handler: DataHandler = DiskHandler(),
train_alias: str = 'train',
test_alias: str = 'test',
target_all: str = 'all'):
self.features_root_path = features_root_path
self.base_output_path = base_output_path
self.classifiers = classifiers
self.properties = properties
self.models = models
self.data_handler = data_handler
self.train_alias = train_alias
self.test_alias = test_alias
self.target_all = target_all
self.intra_dataset_output = os.path.join(self.base_output_path, self.INTRA_NAME)
self.inter_dataset_output = os.path.join(self.base_output_path, self.INTER_NAME)
self.meta_dataset_output = os.path.join(self.base_output_path, self.META_NAME)
def _list_variations(self):
# classifiers = [svc, svm, etc]
for classifier in self.classifiers:
# prop = [depth, illum, etc]
for prop in self.properties:
# models = [resnet, vgg, etc]
for model in self.models:
yield [model, prop, classifier]
def _save_artifacts(self, classifier: BaseClassifier,
output_dir: str,
y_true: np.ndarray,
y_pred: np.ndarray,
y_pred_proba: np.ndarray,
results: np.ndarray):
file_helper.guarantee_path_preconditions(output_dir)
# save preds
np.save(os.path.join(output_dir, self.TRUE_NAME), y_true)
np.save(os.path.join(output_dir, self.PRED_NAME), y_pred)
np.save(os.path.join(output_dir, self.PROBA_NAME), y_pred_proba)
# save fitted model
model_path = os.path.join(output_dir, self.MODEL_NAME)
with open(model_path, 'wb') as f:
pickle.dump(classifier, f)
# save HTER, APCER and BPCER
results_path = os.path.join(output_dir, self.RESULTS_NAME)
result = '%.5f\n%.5f\n%.5f' % (results[0], results[1], results[2])
save_txt(results_path, result)
def _fit(self, classifier: BaseClassifier,
X_train: np.ndarray,
y_train: np.ndarray) -> BaseClassifier:
X_train = np.reshape(X_train, (X_train.shape[0], -1))
classifier.fit(X_train, y_train)
return classifier
def _predict(self, classifier: BaseClassifier,
X_train: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
y_pred = classifier.predict(X_train)
y_pred_proba = classifier.predict_proba(X_train)
return y_pred, y_pred_proba
def _fit_and_predict(self, classifier: BaseClassifier,
X_train: np.ndarray,
y_train: np.ndarray,
X_test: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
y_pred_proba = classifier.predict_proba(X_test)
return y_pred, y_pred_proba
def _evaluate_results(self, y_pred, y_test, names_test) -> Tuple[float, float, float]:
hter, apcer, bpcer = evaluate_hter.evaluate_with_values(y_pred, y_test, names_test)
acc = accuracy_score(y_test, y_pred)
return hter, apcer, bpcer, acc
| 2.03125
| 2
|
Wasteland/__init__.py
|
Tonysun1/Explore-Z-Wasteland
| 1
|
12776055
|
<reponame>Tonysun1/Explore-Z-Wasteland
import app.intro
if __name__ == '__main__':
app.intro.init()
| 1.046875
| 1
|
src/sage/modular/overconvergent/__init__.py
|
bopopescu/sage
| 3
|
12776056
|
from __future__ import absolute_import
pass
from . import all
| 1.054688
| 1
|
qcarsim/constructors.py
|
QUT-Motorsport/QUTMS_VehicleSim
| 2
|
12776057
|
from qcarsim import *
def dummy_output(number):
doubled_value = dummy_calc(number)
return p_tag(doubled_value)
| 1.796875
| 2
|
Test.py
|
OdinManiac/test_repo
| 0
|
12776058
|
print("ololo")
| 1.078125
| 1
|
python/validAnagram.py
|
guozengxin/myleetcode
| 0
|
12776059
|
<reponame>guozengxin/myleetcode
# https://leetcode.com/problems/valid-anagram/
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
if len(s) != len(t):
return False
chars = {}
for c in s:
if c in chars:
chars[c] += 1
else:
chars[c] = 1
for c in t:
if c not in chars:
return False
else:
chars[c] -= 1
if chars[c] < 0:
return False
return True
s = "anagram"
t = "nagaram"
print Solution().isAnagram(s, t)
s = "rat"
t = "car"
print Solution().isAnagram(s, t)
| 3.53125
| 4
|
agent.py
|
jackdolgin/FHDMM
| 11
|
12776060
|
<reponame>jackdolgin/FHDMM
import sys
import numpy as np
from pandas import DataFrame
import pandas as pd
from utils import softmax
pd.options.display.float_format = '{:.2f}'.format
class ContextualBandit(object):
def __init__(self):
# Contexts and their probabilities of winning
self.contexts = {'punishment': 0.2,
'neutral': 0.5,
'reward': 0.8}
self.actions = (23, 14, 8, 3)
self.n = len(self.actions)
self.get_context()
def get_context_list(self):
return list(self.contexts.keys())
def get_context(self):
self.context = np.random.choice(list(self.contexts.keys()))
return self.context
def reward(self, action):
if action not in self.actions:
print('Error: action not in', self.actions)
sys.exit(-1)
p = self.contexts[self.context]
if np.random.rand() < p:
r = action
else:
r = -action
return r
class ContextualAgent(object):
def __init__(self, bandit, beta=None, alpha=None):
self.beta = beta
self.bandit = bandit
self.actions = self.bandit.actions
self.contexts = self.bandit.get_context_list()
self.n = bandit.n
self.alpha = alpha
self.Q = {}
# init with small random numbers to avoid ties
for context in self.contexts:
self.Q[context] = np.random.uniform(0, 1e-4, self.n)
self.log = {'context':[], 'reward':[], 'action':[],
'Q(c,23)':[], 'Q(c,14)':[], 'Q(c,8)':[], 'Q(c,3)': []}
def run(self):
context = self.bandit.get_context()
action = self.choose_action(context)
reward = self.bandit.reward(self.actions[action])
# Update action-value
self.update_action_value(context, action, reward)
# Keep track of performance
self.log['context'].append(context)
self.log['reward'].append(reward)
self.log['action'].append(self.actions[action])
self.log['Q(c,23)'].append(self.Q[context][0])
self.log['Q(c,14)'].append(self.Q[context][1])
self.log['Q(c,8)'].append(self.Q[context][2])
self.log['Q(c,3)'].append(self.Q[context][3])
def choose_action(self, context):
p = softmax(self.Q[context], self.beta)
actions = range(self.n)
action = np.random.choice(actions, p=p)
return action
def update_action_value(self, context, action, reward):
error = reward - self.Q[context][action]
self.Q[context][action] += self.alpha * error
def run_single_softmax_experiment(beta, alpha):
"""Run experiment with agent using softmax update rule."""
print('Running a contextual bandit experiment')
cb = ContextualBandit()
ca = ContextualAgent(cb, beta=beta, alpha=alpha)
trials = 360
for _ in range(trials):
ca.run()
df = DataFrame(ca.log, columns=('context', 'action', 'reward', 'Q(c,23)',
'Q(c,14)', 'Q(c,8)', 'Q(c,3)'))
# fn = 'softmax_experiment.csv'
# df.to_csv(fn, index=False)
# print('Sequence written in', fn)
# globals().update(locals()) #
return df
if __name__ == '__main__':
np.random.seed(42)
beta = 0.5
alpha = 0.1
print('Running experiment with alpha={} and beta={}'.format(alpha, beta))
run_single_softmax_experiment(beta, alpha)
# import vis
# import matplotlib.pyplot as plt
# plt.close('all')
# vis.plot_simulation_run()
| 2.828125
| 3
|
Competitie/views_rayon_teams.py
|
RamonvdW/nhb-apps
| 1
|
12776061
|
<filename>Competitie/views_rayon_teams.py
# -*- coding: utf-8 -*-
# Copyright (c) 2019-2021 <NAME>.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
from django.http import Http404
from django.urls import reverse
from django.views.generic import TemplateView
from django.core.exceptions import PermissionDenied
from django.contrib.auth.mixins import UserPassesTestMixin
from Functie.rol import Rollen, rol_get_huidige_functie
from NhbStructuur.models import NhbRayon
from .models import LAAG_RK, AG_NUL, Competitie, CompetitieKlasse, DeelCompetitie, KampioenschapTeam
from .menu import menu_dynamics_competitie
TEMPLATE_COMPETITIE_RKO_TEAMS = 'competitie/rko-teams.dtl'
class RayonTeamsView(TemplateView):
""" Met deze view kan een lijst van teams getoond worden, zowel landelijk, rayon als regio """
template_name = TEMPLATE_COMPETITIE_RKO_TEAMS
subset_filter = False
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.rol_nu, self.functie_nu = None, None
def get_context_data(self, **kwargs):
""" called by the template system to get the context data for the template """
context = super().get_context_data(**kwargs)
if self.subset_filter:
context['subset_filter'] = True
# BB/BKO mode
try:
comp_pk = int(str(kwargs['comp_pk'][:6])) # afkappen geeft veiligheid
comp = Competitie.objects.get(pk=comp_pk)
except (ValueError, Competitie.DoesNotExist):
raise Http404('Competitie niet gevonden')
context['comp'] = comp
comp.bepaal_fase()
subset = kwargs['subset'][:10]
if subset == 'auto':
subset = 'alle'
if subset == 'alle':
# alle rayons
context['rayon'] = 'Alle'
rk_deelcomp_pks = (DeelCompetitie
.objects
.filter(competitie=comp,
laag=LAAG_RK)
.values_list('pk', flat=True))
else:
# alleen het gekozen rayon
try:
rayon_nr = int(str(subset)) # is al eerder afgekapt op 10
context['rayon'] = NhbRayon.objects.get(rayon_nr=rayon_nr)
except (ValueError, NhbRayon.DoesNotExist):
raise Http404('Selectie wordt niet ondersteund')
rk_deelcomp_pks = (DeelCompetitie
.objects
.filter(competitie=comp,
nhb_rayon=context['rayon'])
.values_list('pk', flat=True))
context['filters'] = filters = list()
alle_filter = {'label': 'Alles'}
if subset != 'alle':
alle_filter['url'] = reverse('Competitie:rayon-teams-alle',
kwargs={'comp_pk': comp.pk,
'subset': 'alle'})
filters.append(alle_filter)
for rayon in NhbRayon.objects.all():
rayon.label = 'Rayon %s' % rayon.rayon_nr
if str(rayon.rayon_nr) != subset:
rayon.url = reverse('Competitie:rayon-teams-alle',
kwargs={'comp_pk': comp.pk, 'subset': rayon.rayon_nr})
filters.append(rayon)
# for
else:
# RKO mode
try:
rk_deelcomp_pk = int(kwargs['rk_deelcomp_pk'][:6]) # afkappen geeft veiligheid
deelcomp_rk = (DeelCompetitie
.objects
.select_related('competitie')
.get(pk=rk_deelcomp_pk,
laag=LAAG_RK))
except (ValueError, DeelCompetitie.DoesNotExist):
raise Http404('Competitie niet gevonden')
if deelcomp_rk.functie != self.functie_nu:
# niet de beheerder
raise PermissionDenied()
rk_deelcomp_pks = [deelcomp_rk.pk]
context['comp'] = comp = deelcomp_rk.competitie
comp.bepaal_fase()
context['deelcomp'] = deelcomp_rk
context['rayon'] = self.functie_nu.nhb_rayon
if comp.afstand == '18':
aantal_pijlen = 30
else:
aantal_pijlen = 25
totaal_teams = 0
klassen = (CompetitieKlasse
.objects
.filter(competitie=comp,
indiv=None)
.select_related('team',
'team__team_type')
.order_by('team__volgorde'))
klasse2teams = dict() # [klasse] = list(teams)
prev_sterkte = ''
prev_team = None
for klasse in klassen:
klasse2teams[klasse] = list()
if klasse.team.team_type != prev_team:
prev_sterkte = ''
prev_team = klasse.team.team_type
min_ag_str = "%05.1f" % (klasse.min_ag * aantal_pijlen)
min_ag_str = min_ag_str.replace('.', ',')
if prev_sterkte:
if klasse.min_ag > AG_NUL:
klasse.sterkte_str = "sterkte " + min_ag_str + " tot " + prev_sterkte
else:
klasse.sterkte_str = "sterkte tot " + prev_sterkte
else:
klasse.sterkte_str = "sterkte " + min_ag_str + " en hoger"
prev_sterkte = min_ag_str
# for
rk_teams = (KampioenschapTeam
.objects
.select_related('vereniging',
'vereniging__regio',
'team_type',
'klasse',
'klasse__team')
.exclude(klasse=None)
.filter(deelcompetitie__in=rk_deelcomp_pks)
.order_by('klasse__team__volgorde',
'-aanvangsgemiddelde',
'vereniging__ver_nr'))
prev_klasse = None
for team in rk_teams:
if team.klasse != prev_klasse:
team.break_before = True
prev_klasse = team.klasse
# team AG is 0.0 - 30.0 --> toon als score: 000.0 .. 900.0
ag_str = "%05.1f" % (team.aanvangsgemiddelde * aantal_pijlen)
team.ag_str = ag_str.replace('.', ',')
if comp.fase <= 'D' and self.rol_nu == Rollen.ROL_RKO:
team.url_aanpassen = reverse('Vereniging:teams-rayon-koppelen',
kwargs={'team_pk': team.pk})
totaal_teams += 1
klasse2teams[team.klasse].append(team)
# for
context['rk_teams'] = klasse2teams
# zoek de teams die niet 'af' zijn
rk_teams = (KampioenschapTeam
.objects
.select_related('vereniging',
'vereniging__regio',
'team_type',
'deelcompetitie')
.filter(deelcompetitie__in=rk_deelcomp_pks,
klasse=None)
.order_by('team_type__volgorde',
'-aanvangsgemiddelde',
'vereniging__ver_nr'))
is_eerste = True
for team in rk_teams:
# team AG is 0.0 - 30.0 --> toon als score: 000,0 .. 900,0
ag_str = "%05.1f" % (team.aanvangsgemiddelde * aantal_pijlen)
team.ag_str = ag_str.replace('.', ',')
if comp.fase <= 'D' and self.rol_nu == Rollen.ROL_RKO:
team.url_aanpassen = reverse('Vereniging:teams-rayon-koppelen',
kwargs={'team_pk': team.pk})
team.url_verwijder = reverse('Vereniging:teams-rayon-wijzig',
kwargs={'rk_deelcomp_pk': team.deelcompetitie.pk,
'team_pk': team.pk})
totaal_teams += 1
team.break_before = is_eerste
is_eerste = False
# for
context['rk_teams_niet_af'] = rk_teams
context['totaal_teams'] = totaal_teams
menu_dynamics_competitie(self.request, context, comp_pk=comp.pk)
return context
class RayonTeamsRKOView(UserPassesTestMixin, RayonTeamsView):
""" Met deze view kan de RKO de aangemaakte teams inzien """
# class variables shared by all instances
subset_filter = False
raise_exception = True # genereer PermissionDenied als test_func False terug geeft
def test_func(self):
""" called by the UserPassesTestMixin to verify the user has permissions to use this view """
self.rol_nu, self.functie_nu = rol_get_huidige_functie(self.request)
return self.rol_nu == Rollen.ROL_RKO
class RayonTeamsAlleView(UserPassesTestMixin, RayonTeamsView):
""" Met deze view kunnen de BB en BKO de aangemaakte teams inzien, met mogelijkheid tot filteren op een rayon """
# class variables shared by all instances
subset_filter = True
raise_exception = True # genereer PermissionDenied als test_func False terug geeft
def test_func(self):
""" called by the UserPassesTestMixin to verify the user has permissions to use this view """
self.rol_nu, self.functie_nu = rol_get_huidige_functie(self.request)
return self.rol_nu in (Rollen.ROL_BB, Rollen.ROL_BKO)
# end of file
| 2.0625
| 2
|
src/__init__.py
|
bwu62/subshift
| 1
|
12776062
|
<gh_stars>1-10
from .subshift import Subtitle
import glob
def listSrts():
return glob.glob("./*.srt")
| 1.867188
| 2
|
examples/test_franka.py
|
zhuyifengzju/pybullet-planning
| 1
|
12776063
|
#!/usr/bin/env python
from __future__ import print_function
import pybullet as p
from pybullet_tools.utils import add_data_path, connect, dump_body, load_model, disconnect, wait_for_user, \
get_movable_joints, get_sample_fn, set_joint_positions, get_joint_name, LockRenderer, link_from_name, get_link_pose, \
multiply, Pose, Point, interpolate_poses, HideOutput
from pybullet_tools.ikfast.franka_panda.ik import PANDA_INFO, FRANKA_URDF
from pybullet_tools.ikfast.ikfast import get_ik_joints, closest_inverse_kinematics
def test_retraction(robot, info, tool_link, distance=0.1, **kwargs):
ik_joints = get_ik_joints(robot, info, tool_link)
start_pose = get_link_pose(robot, tool_link)
end_pose = multiply(start_pose, Pose(Point(z=-distance)))
for pose in interpolate_poses(start_pose, end_pose, pos_step_size=0.01):
conf = next(closest_inverse_kinematics(robot, info, tool_link, pose, **kwargs), None)
if conf is None:
print('Failure!')
wait_for_user()
break
set_joint_positions(robot, ik_joints, conf)
wait_for_user()
# for conf in islice(ikfast_inverse_kinematics(robot, info, tool_link, pose, max_attempts=INF, max_distance=0.5), 1):
# set_joint_positions(robot, joints[:len(conf)], conf)
# wait_for_user()
#####################################
def main():
connect(use_gui=True)
add_data_path()
plane = p.loadURDF("plane.urdf")
with HideOutput():
with LockRenderer():
robot = load_model(FRANKA_URDF, fixed_base=True)
dump_body(robot)
print('Start?')
wait_for_user()
tool_link = link_from_name(robot, 'panda_hand')
joints = get_movable_joints(robot)
print('Joints', [get_joint_name(robot, joint) for joint in joints])
sample_fn = get_sample_fn(robot, joints)
for i in range(10):
print('Iteration:', i)
conf = sample_fn()
set_joint_positions(robot, joints, conf)
wait_for_user()
test_retraction(robot, PANDA_INFO, tool_link, max_distance=0.01, max_time=0.05)
disconnect()
if __name__ == '__main__':
main()
| 2.03125
| 2
|
setup.py
|
jonathantumulak/docker-push-ssh
| 70
|
12776064
|
<filename>setup.py<gh_stars>10-100
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="docker-push-ssh",
version="0.1.8",
author="<NAME>",
author_email="",
description="Push local docker images to your remote servers via ssh without the hassle.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/brthor/docker-push-ssh",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Topic :: Software Development :: Build Tools",
"Topic :: Utilities"
],
scripts=[
"bin/docker-push-ssh"
]
)
| 1.726563
| 2
|
predict/module/tvm_kernel/lite/python/at_ops/config_tool.py
|
unseenme/mindspore
| 7
|
12776065
|
<reponame>unseenme/mindspore<filename>predict/module/tvm_kernel/lite/python/at_ops/config_tool.py<gh_stars>1-10
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
This module is define some data struct for tvm kernel.
"""
import tvm
import topi
format_map = {"NCHW": 0, "NHWC": 1}
pool_map = {"max_pool": 0, "avg_pool": 1, "global_pool": 2}
activation_map = {
"no_activation": 0,
"relu": 1,
"sigmoid": 2,
"relu6": 3,
"elu": 4,
"leaky_relu": 5,
"abs": 6,
"relu1": 7,
"softsign": 8,
"softplus": 9,
"tanh ": 10,
}
activation_enum_map = {
"NO_ACTIVATION": 0,
"RELU": 1,
"SIGMOID": 2,
"RELU6": 3,
"elu": 4,
"leaky_relu": 5,
"abs": 6,
"relu1": 7,
"softsign": 8,
"softplus": 9,
"tanh ": 10,
}
padmode_map = {"NOTSET": 0, "SAME": 1, "VALID": 2}
mslite_datatype_map = {
"float16": 1,
"float32": 0,
"double": 11,
"int8": 2,
"int16": 6,
"int32": 3,
"int64": 9,
"uint8": 4,
"uint16": 7,
"uint32": 8,
"uint64": 10,
}
def get_key_by_value(dicts, value):
for k, v in dicts.items():
if v == value:
return k
return None
def relu6(x):
return tvm.compute(
x.shape,
lambda *i: tvm.min(
tvm.max(x(*i), tvm.const(0, x.dtype)), tvm.const(6, x.dtype)
),
)
activation_topi_funs = {"NO_ACTIVATION": None, "RELU": topi.nn.relu, "RELU6": relu6}
name_funcs = {
"Concat": (
lambda opname, x: (
opname + "_%d_%d" + "_%d" + "_%d" * x["ndim"] + "_%d" * len(x["shapeAxis"])
)
% (
format_map[x["format"]],
x["ndim"],
x["axis"],
*x["shapeOut"],
*x["shapeAxis"],
)
),
"Softmax": (
lambda opname, x: (opname + "_%d_%d" + "_%d" * x["ndim"] + "_%d")
% (format_map[x["format"]], x["ndim"], *x["shape"], x["axis"])
),
"Activation": (
lambda opname, x: (opname + "_%d_%d" + "_%d" + "_%d" * x["ndim"])
% (format_map[x["format"]], x["ndim"], activation_map[x["type"]], *x["shape"])
),
"Add": (
lambda opname, x: (opname + "_%d_%d" + "_%d" * x["ndim"])
% (format_map[x["format"]], x["ndim"], *x["shape"])
),
"Convolution": (
lambda opname, x: (
opname + "_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d"
)
% (
format_map[x["format"]],
x["ndim"],
x["batch"],
x["in_channel"],
*x["in_size"],
x["num_filter"],
*x["filter_size"],
*x["pad"],
*x["stride"],
x["dilation"],
x["hasbias"],
activation_map[x["activation_type"]],
)
),
"Identity": (
lambda opname, x: (opname + "_%d_%d" + "_%d" * x["ndim"])
% (format_map[x["format"]], x["ndim"], *x["shape"])
),
"BatchNorm": (
lambda opname, x: (opname + "_%d_%d" + "_%d" * x["ndim"] + "_%d")
% (format_map[x["format"]], x["ndim"], *x["shape"], x["epsilon"])
),
"Squeeze": (
lambda opname, x: (
opname + "_%d_%d" + "_%d" * x["ndim"] + "_%d" * len(x["axis"])
)
% (format_map[x["format"]], x["ndim"], *x["shape"], *x["axis"])
),
"BiasAdd": (
lambda opname, x: (opname + "_%d_%d" + "_%d" * x["ndim"] + "_%d")
% (format_map[x["format"]], x["ndim"], *x["shape"], x["axis"])
),
"Pooling": (
lambda opname, x: (opname + "_%d_%d_%d" + "_%d" * x["ndim"] + "_%d_%d_%d")
% (
format_map[x["format"]],
x["ndim"],
pool_map[x["type"]],
*x["shape"],
x["kernel"],
x["stride"],
x["pad"],
)
),
"ConvolutionDepthwise": (
lambda opname, x: (
opname + "_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d"
)
% (
format_map[x["format"]],
x["ndim"],
x["batch"],
x["in_channel"],
*x["in_size"],
x["in_channel"] * x["channel_multiplier"],
*x["filter_size"],
*x["pad"],
*x["stride"],
x["dilation"],
x["hasbias"],
activation_map[x["activation_type"]],
)
),
"Reshape": (
lambda opname, x: (
opname + "_%d_%d" + "_%d" * x["ndimA"] + "_%d" * len(x["shapeB"])
)
% (format_map[x["format"]], x["ndimA"], *x["shapeA"], *x["shapeB"])
),
"Shape": (
lambda opname, x: (opname + "_%d_%d" + "_%d" * x["ndim"])
% (format_map[x["format"]], x["ndim"], *x["shape"])
),
"RealDiv": (
lambda opname, x: (
opname + "_%d_%d" + "_%d" * x["ndim"] + "_%d" * len(x["shapeB"])
)
% (format_map[x["format"]], x["ndim"], *x["shapeA"], *x["shapeB"])
),
"ResizeBilinear": (lambda opname, x: "ResizeBilinear"),
"TFLite_Detection_PostProcess": (lambda opname, x: "TFLite_Detection_PostProcess"),
}
config_dict = {op_type: [] for op_type in name_funcs}
def config_dict_append(op_type, config, opname=None):
if opname is None:
config["opname"] = name_funcs[op_type](op_type, config)
else:
config["opname"] = opname
duplicate = [True for x in config_dict[op_type] if config == x]
if duplicate:
config_dict[op_type].append(config)
| 1.726563
| 2
|
accelbyte_py_sdk/api/platform/models/fulfillment_history_info.py
|
AccelByte/accelbyte-python-sdk
| 0
|
12776066
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# justice-platform-service (4.10.0)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
from ....core import StrEnum
from ..models.credit_summary import CreditSummary
from ..models.entitlement_summary import EntitlementSummary
from ..models.fulfillment_error import FulfillmentError
from ..models.fulfillment_item import FulfillmentItem
class StatusEnum(StrEnum):
SUCCESS = "SUCCESS"
FAIL = "FAIL"
class FulfillmentHistoryInfo(Model):
"""Fulfillment history info (FulfillmentHistoryInfo)
Properties:
created_at: (createdAt) REQUIRED str
id_: (id) REQUIRED str
namespace: (namespace) REQUIRED str
status: (status) REQUIRED Union[str, StatusEnum]
updated_at: (updatedAt) REQUIRED str
user_id: (userId) REQUIRED str
code: (code) OPTIONAL str
credit_summaries: (creditSummaries) OPTIONAL List[CreditSummary]
entitlement_summaries: (entitlementSummaries) OPTIONAL List[EntitlementSummary]
fulfill_items: (fulfillItems) OPTIONAL List[FulfillmentItem]
fulfillment_error: (fulfillmentError) OPTIONAL FulfillmentError
granted_item_ids: (grantedItemIds) OPTIONAL List[str]
order_no: (orderNo) OPTIONAL str
"""
# region fields
created_at: str # REQUIRED
id_: str # REQUIRED
namespace: str # REQUIRED
status: Union[str, StatusEnum] # REQUIRED
updated_at: str # REQUIRED
user_id: str # REQUIRED
code: str # OPTIONAL
credit_summaries: List[CreditSummary] # OPTIONAL
entitlement_summaries: List[EntitlementSummary] # OPTIONAL
fulfill_items: List[FulfillmentItem] # OPTIONAL
fulfillment_error: FulfillmentError # OPTIONAL
granted_item_ids: List[str] # OPTIONAL
order_no: str # OPTIONAL
# endregion fields
# region with_x methods
def with_created_at(self, value: str) -> FulfillmentHistoryInfo:
self.created_at = value
return self
def with_id(self, value: str) -> FulfillmentHistoryInfo:
self.id_ = value
return self
def with_namespace(self, value: str) -> FulfillmentHistoryInfo:
self.namespace = value
return self
def with_status(self, value: Union[str, StatusEnum]) -> FulfillmentHistoryInfo:
self.status = value
return self
def with_updated_at(self, value: str) -> FulfillmentHistoryInfo:
self.updated_at = value
return self
def with_user_id(self, value: str) -> FulfillmentHistoryInfo:
self.user_id = value
return self
def with_code(self, value: str) -> FulfillmentHistoryInfo:
self.code = value
return self
def with_credit_summaries(self, value: List[CreditSummary]) -> FulfillmentHistoryInfo:
self.credit_summaries = value
return self
def with_entitlement_summaries(self, value: List[EntitlementSummary]) -> FulfillmentHistoryInfo:
self.entitlement_summaries = value
return self
def with_fulfill_items(self, value: List[FulfillmentItem]) -> FulfillmentHistoryInfo:
self.fulfill_items = value
return self
def with_fulfillment_error(self, value: FulfillmentError) -> FulfillmentHistoryInfo:
self.fulfillment_error = value
return self
def with_granted_item_ids(self, value: List[str]) -> FulfillmentHistoryInfo:
self.granted_item_ids = value
return self
def with_order_no(self, value: str) -> FulfillmentHistoryInfo:
self.order_no = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "created_at"):
result["createdAt"] = str(self.created_at)
elif include_empty:
result["createdAt"] = ""
if hasattr(self, "id_"):
result["id"] = str(self.id_)
elif include_empty:
result["id"] = ""
if hasattr(self, "namespace"):
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
if hasattr(self, "status"):
result["status"] = str(self.status)
elif include_empty:
result["status"] = Union[str, StatusEnum]()
if hasattr(self, "updated_at"):
result["updatedAt"] = str(self.updated_at)
elif include_empty:
result["updatedAt"] = ""
if hasattr(self, "user_id"):
result["userId"] = str(self.user_id)
elif include_empty:
result["userId"] = ""
if hasattr(self, "code"):
result["code"] = str(self.code)
elif include_empty:
result["code"] = ""
if hasattr(self, "credit_summaries"):
result["creditSummaries"] = [i0.to_dict(include_empty=include_empty) for i0 in self.credit_summaries]
elif include_empty:
result["creditSummaries"] = []
if hasattr(self, "entitlement_summaries"):
result["entitlementSummaries"] = [i0.to_dict(include_empty=include_empty) for i0 in self.entitlement_summaries]
elif include_empty:
result["entitlementSummaries"] = []
if hasattr(self, "fulfill_items"):
result["fulfillItems"] = [i0.to_dict(include_empty=include_empty) for i0 in self.fulfill_items]
elif include_empty:
result["fulfillItems"] = []
if hasattr(self, "fulfillment_error"):
result["fulfillmentError"] = self.fulfillment_error.to_dict(include_empty=include_empty)
elif include_empty:
result["fulfillmentError"] = FulfillmentError()
if hasattr(self, "granted_item_ids"):
result["grantedItemIds"] = [str(i0) for i0 in self.granted_item_ids]
elif include_empty:
result["grantedItemIds"] = []
if hasattr(self, "order_no"):
result["orderNo"] = str(self.order_no)
elif include_empty:
result["orderNo"] = ""
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
created_at: str,
id_: str,
namespace: str,
status: Union[str, StatusEnum],
updated_at: str,
user_id: str,
code: Optional[str] = None,
credit_summaries: Optional[List[CreditSummary]] = None,
entitlement_summaries: Optional[List[EntitlementSummary]] = None,
fulfill_items: Optional[List[FulfillmentItem]] = None,
fulfillment_error: Optional[FulfillmentError] = None,
granted_item_ids: Optional[List[str]] = None,
order_no: Optional[str] = None,
) -> FulfillmentHistoryInfo:
instance = cls()
instance.created_at = created_at
instance.id_ = id_
instance.namespace = namespace
instance.status = status
instance.updated_at = updated_at
instance.user_id = user_id
if code is not None:
instance.code = code
if credit_summaries is not None:
instance.credit_summaries = credit_summaries
if entitlement_summaries is not None:
instance.entitlement_summaries = entitlement_summaries
if fulfill_items is not None:
instance.fulfill_items = fulfill_items
if fulfillment_error is not None:
instance.fulfillment_error = fulfillment_error
if granted_item_ids is not None:
instance.granted_item_ids = granted_item_ids
if order_no is not None:
instance.order_no = order_no
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> FulfillmentHistoryInfo:
instance = cls()
if not dict_:
return instance
if "createdAt" in dict_ and dict_["createdAt"] is not None:
instance.created_at = str(dict_["createdAt"])
elif include_empty:
instance.created_at = ""
if "id" in dict_ and dict_["id"] is not None:
instance.id_ = str(dict_["id"])
elif include_empty:
instance.id_ = ""
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
if "status" in dict_ and dict_["status"] is not None:
instance.status = str(dict_["status"])
elif include_empty:
instance.status = Union[str, StatusEnum]()
if "updatedAt" in dict_ and dict_["updatedAt"] is not None:
instance.updated_at = str(dict_["updatedAt"])
elif include_empty:
instance.updated_at = ""
if "userId" in dict_ and dict_["userId"] is not None:
instance.user_id = str(dict_["userId"])
elif include_empty:
instance.user_id = ""
if "code" in dict_ and dict_["code"] is not None:
instance.code = str(dict_["code"])
elif include_empty:
instance.code = ""
if "creditSummaries" in dict_ and dict_["creditSummaries"] is not None:
instance.credit_summaries = [CreditSummary.create_from_dict(i0, include_empty=include_empty) for i0 in dict_["creditSummaries"]]
elif include_empty:
instance.credit_summaries = []
if "entitlementSummaries" in dict_ and dict_["entitlementSummaries"] is not None:
instance.entitlement_summaries = [EntitlementSummary.create_from_dict(i0, include_empty=include_empty) for i0 in dict_["entitlementSummaries"]]
elif include_empty:
instance.entitlement_summaries = []
if "fulfillItems" in dict_ and dict_["fulfillItems"] is not None:
instance.fulfill_items = [FulfillmentItem.create_from_dict(i0, include_empty=include_empty) for i0 in dict_["fulfillItems"]]
elif include_empty:
instance.fulfill_items = []
if "fulfillmentError" in dict_ and dict_["fulfillmentError"] is not None:
instance.fulfillment_error = FulfillmentError.create_from_dict(dict_["fulfillmentError"], include_empty=include_empty)
elif include_empty:
instance.fulfillment_error = FulfillmentError()
if "grantedItemIds" in dict_ and dict_["grantedItemIds"] is not None:
instance.granted_item_ids = [str(i0) for i0 in dict_["grantedItemIds"]]
elif include_empty:
instance.granted_item_ids = []
if "orderNo" in dict_ and dict_["orderNo"] is not None:
instance.order_no = str(dict_["orderNo"])
elif include_empty:
instance.order_no = ""
return instance
@classmethod
def create_many_from_dict(cls, dict_: dict, include_empty: bool = False) -> Dict[str, FulfillmentHistoryInfo]:
return {k: cls.create_from_dict(v, include_empty=include_empty) for k, v in dict_} if dict_ else {}
@classmethod
def create_many_from_list(cls, list_: list, include_empty: bool = False) -> List[FulfillmentHistoryInfo]:
return [cls.create_from_dict(i, include_empty=include_empty) for i in list_] if list_ else []
@classmethod
def create_from_any(cls, any_: any, include_empty: bool = False, many: bool = False) -> Union[FulfillmentHistoryInfo, List[FulfillmentHistoryInfo], Dict[Any, FulfillmentHistoryInfo]]:
if many:
if isinstance(any_, dict):
return cls.create_many_from_dict(any_, include_empty=include_empty)
elif isinstance(any_, list):
return cls.create_many_from_list(any_, include_empty=include_empty)
else:
raise ValueError()
else:
return cls.create_from_dict(any_, include_empty=include_empty)
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"createdAt": "created_at",
"id": "id_",
"namespace": "namespace",
"status": "status",
"updatedAt": "updated_at",
"userId": "user_id",
"code": "code",
"creditSummaries": "credit_summaries",
"entitlementSummaries": "entitlement_summaries",
"fulfillItems": "fulfill_items",
"fulfillmentError": "fulfillment_error",
"grantedItemIds": "granted_item_ids",
"orderNo": "order_no",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"createdAt": True,
"id": True,
"namespace": True,
"status": True,
"updatedAt": True,
"userId": True,
"code": False,
"creditSummaries": False,
"entitlementSummaries": False,
"fulfillItems": False,
"fulfillmentError": False,
"grantedItemIds": False,
"orderNo": False,
}
@staticmethod
def get_enum_map() -> Dict[str, List[Any]]:
return {
"status": ["SUCCESS", "FAIL"],
}
# endregion static methods
| 1.835938
| 2
|
djgumroad/products/views.py
|
hiroto0222/gumroad-clone
| 0
|
12776067
|
from django.http.response import JsonResponse
from django.shortcuts import redirect, render
from django.views import generic
from .models import Product
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from .forms import ProductModelForm
import stripe
from django.conf import settings
stripe.api_key = settings.STRIPE_SECRET_KEY
# Create your views here.
class ProductListView(generic.ListView):
template_name = "discover.html"
queryset = Product.objects.filter(active=True)
class ProductDetailView(generic.DetailView):
template_name = "products/product.html"
queryset = Product.objects.all()
context_object_name = "product"
def get_context_data(self, **kwargs):
context = super(ProductDetailView, self).get_context_data(**kwargs)
context.update({
"STRIPE_PUBLIC_KEY": settings.STRIPE_PUBLIC_KEY
})
return context
class UserProductListView(LoginRequiredMixin, generic.ListView):
# shows the users created products
template_name = "products.html"
def get_queryset(self):
return Product.objects.filter(user=self.request.user)
class ProductCreateView(LoginRequiredMixin, generic.CreateView):
template_name = "products/product_create.html"
form_class = ProductModelForm
def get_success_url(self) -> str:
return reverse("products:product-detail", kwargs={
"slug": self.product.slug
})
def form_valid(self, form):
instance = form.save(commit=False)
instance.user = self.request.user
instance.save()
self.product = instance
return super(ProductCreateView, self).form_valid(form)
class ProductUpdateView(LoginRequiredMixin, generic.UpdateView):
template_name = "products/product_update.html"
form_class = ProductModelForm
def get_queryset(self):
return Product.objects.filter(user=self.request.user)
def get_success_url(self) -> str:
return reverse("products:product-detail", kwargs={
"slug": self.get_object().slug
})
class ProductDeleteView(LoginRequiredMixin, generic.DeleteView):
template_name = "products/product_delete.html"
def get_queryset(self):
return Product.objects.filter(user=self.request.user)
def get_success_url(self) -> str:
return reverse("user-products")
class CreateCheckoutSessionView(generic.View):
def post(self, request, *args, **kwargs):
product = Product.objects.get(slug=self.kwargs["slug"])
print(product)
domain = "http://domain.com"
if settings.DEBUG:
domain = "http://127.0.0.1:8000"
session = stripe.checkout.Session.create(
line_items=[
{
'price_data': {
'currency': 'usd',
'product_data': {
'name': product.name,
},
'unit_amount': product.price,
},
'quantity': 1,
}],
mode='payment',
success_url=domain + reverse("success"),
cancel_url=domain + reverse("discover"),)
return JsonResponse({
"id": session.id
})
class SuccessView(generic.TemplateView):
template_name = "success.html"
| 2.046875
| 2
|
app/core/console/database/migration.py
|
tomkiel/falafel
| 1
|
12776068
|
<filename>app/core/console/database/migration.py
from app.core.console.http import route, controller
from textwrap import dedent
from os import listdir, remove
from fnmatch import fnmatch
from sqlalchemy import create_engine, \
MetaData, \
orm, \
exc, \
inspect, \
Table, \
insert, \
Column, \
Integer, \
TIMESTAMP, \
Text
from dynaconf import settings
from datetime import datetime
import inflect
import toml
from importlib import import_module
from . import model
""" Comandos de interação com base de dados """
engine = create_engine(settings.get('SQLALCHEMY_DATABASE_URI'))
metadata = MetaData(bind=engine)
create_session = orm.sessionmaker(bind=engine)
session = create_session()
def create_migration(name):
"""
Criar um arquivo de migration com a classe determinada
:param name: string
:return: void
"""
name = name.lower()
p = inflect.engine()
for file in listdir(settings.get('FALAFEL_DIR') + settings.get('MIGRATIONS_DIR')):
if (fnmatch(file, "*.toml")) and fnmatch(file, name + "*"):
print("#######")
print("-> Error!")
print("-> Migration " + name + " existe!")
print("-> Verifique o arquivo em " + settings.get('FALAFEL_DIR')
+ settings.get('MIGRATIONS_DIR') + '/')
print("#######")
raise SystemExit()
if "create" in name:
with open(settings.get('FALAFEL_DIR') + settings.get('MIGRATIONS_DIR')
+ '/' + datetime.now().strftime("%Y%m%d%H%M%S") + '_' + name
+ '.toml', 'w') as migration:
table_name = name.replace('create_', '')
content = dedent("""\
table_name = '""" + p.plural(table_name) + """'
action = 'create'
[create]
[create.id]
primary_key = "True"
type = "Integer"
[create.created_at]
default = "datetime.utcnow"
nullable = "False"
type = "TIMESTAMP"
[create.updated_at]
nullable = "False"
onupdate = "datetime.utcnow"
type = "TIMESTAMP"
""")
migration.write(content)
print("#######")
print("-> Migration " + name + " criada com sucesso!")
print("-> Verifique o arquivo em " + settings.get('FALAFEL_DIR') +
settings.get('MIGRATIONS_DIR') + '/' + datetime.now().strftime("%Y%m%d%H%M%S") + '_' + name + '.toml')
print("#######")
elif "update" in name:
with open(settings.get('FALAFEL_DIR') + settings.get('MIGRATIONS_DIR')
+ '/' + datetime.now().strftime("%Y%m%d%H%M%S") + '_' + name
+ '.toml', 'w') as migration:
table_name = name.replace('update_', '')
content = dedent("""\
table_name = '""" + p.plural(table_name) + """'
action = 'update'
[update]
[update.example]
item = "String"
""")
migration.write(content)
print("#######")
print("-> Migration " + name + " criada com sucesso!")
print("-> Verifique o arquivo em " + settings.get('FALAFEL_DIR') +
settings.get('MIGRATIONS_DIR') + '/' + name + '_' + datetime.now().strftime("%Y%m%d%H%M%S") + '.toml')
print("#######")
else:
print("-> Não é possível criar migration, verifique a documentação!")
print("-> Ex:")
print("-> create: python3 fava.py -mkmigration create_name_table_singular")
print("-> update: python3 fava.py -mkmigration update_name_table_singular")
def run_migrate(name):
"""
Executar um todos arquivos de migração
:param name: string
:return: void
"""
"""
Antes de executar qualquer migração, verificamos se a tabela de migração existe.
"""
if check_exists('migrations') is False:
create_migrations_table()
tables = []
if name == 'all' or name == '':
print('############################')
print('-> Running migrations...')
print('############################\n')
migration_files = sorted(listdir(settings.get('FALAFEL_DIR') + settings.get('MIGRATIONS_DIR')))
for file in migration_files:
if (fnmatch(file, "*.toml")) and (file != "__init__.py"):
migration = toml.load(settings.get('FALAFEL_DIR') + settings.get('MIGRATIONS_DIR') + '/' + file)
if check_exists(migration.get('table_name')) is False:
tables.append(file.replace('.toml', ''))
if migration.get('create'):
run_create_migration(migration)
elif migration.get('update'):
pass
elif migration.get('delete'):
pass
else:
print("-> Columns doesn't exists in " + migration.get('table_name'))
print("-> Check documentation in http://falafel.docs")
if tables:
save_migration_sate(str(tables))
print('############################')
print('All migrations processed')
else:
pass
pass
def run_create_migration(migration):
"""
Criar arquivos de Model para novas migrações
"""
class_name = model.create_model_for_migration(migration)
if class_name:
route.create_route_cmd(class_name)
controller.create_controller_cmd(class_name)
model_path = settings.get('MODELS_DIR').replace('/', '.') + "." + class_name
db_module = getattr(import_module(model_path), class_name)
try:
db_module.metadata.create_all(engine)
print('Success when running the Model Import for ' + class_name)
except Exception as err:
remove(model_path)
raise SystemExit(err)
def check_exists(table_name):
"""
:param table_name: string
:return: boolean
"""
if table_name in inspect(engine).get_table_names():
return True
else:
return False
def create_migrations_table():
"""
Criação de tabela com informações de migrações
:return: void
"""
try:
migration_table = Table('migrations', metadata,
Column('id', Integer, primary_key=True),
Column('table', Text, nullable=False),
Column('created_at', TIMESTAMP, nullable=False, default=datetime.utcnow))
metadata.create_all(engine)
except exc.SQLAlchemyError as err:
print('#########################')
print('-> Error in migration task!')
print('#########################\n')
raise SystemExit(err)
def save_migration_sate(name):
"""
:param name:
:return:
"""
try:
migration_table = Table('migrations', metadata, autoload=True)
insert_migration = insert(migration_table)
insert_migration = insert_migration.values(
{"table": name, "created_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
)
session.execute(insert_migration)
session.commit()
except Exception as err:
session.rollback()
print(err)
| 2.546875
| 3
|
plotting/plot_distance_benchmark.py
|
MartinTschechne/ASL-hdbscan
| 1
|
12776069
|
<filename>plotting/plot_distance_benchmark.py
import argparse
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set()
def main():
"""Plotting script for distance function benchmarking."""
parser = argparse.ArgumentParser(
add_help=True,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--csv_path', type=str, required=True, help='Path to csv file.')
parser.add_argument(
'--save_path', type=str, help='Path to output directory.')
cfg = parser.parse_args()
benchmark_name = cfg.csv_path.split('/')[-1].rstrip(".csv")
measurements_df = pd.read_csv(cfg.csv_path)
opt_names = measurements_df.NAME.unique().tolist()
opt_names.remove("Baseline")
base_df = measurements_df[measurements_df.NAME=="Baseline"]
base_df = base_df.pivot("NUM_POINTS","DIM","CYC")
fig, axes = plt.subplots(len(opt_names),2,figsize=(10,5*len(opt_names)))
if len(opt_names) == 1: # make subplots array 2d, even if we only have a single optimization
axes = [axes]
fig.suptitle(f"{benchmark_name.split('_')[-1]}")
for i, on in enumerate(opt_names):
opt_df = measurements_df[measurements_df.NAME==on]
opt_df = opt_df.pivot("NUM_POINTS","DIM","CYC")
sns.heatmap(opt_df,ax=axes[i][0],annot=True,fmt=".1f",linewidth=.5)
axes[i][0].set_title(f"{on} Cycles per point")
speedup = base_df/opt_df
sns.heatmap(speedup,ax=axes[i][1],annot=True,fmt=".2f",linewidth=.5)
axes[i][1].set_title(f"{on} Speed Up")
plt.tight_layout()
if cfg.save_path:
plt.savefig(f"{cfg.save_path}/{benchmark_name}.png",dpi=100)
else:
plt.show()
return 0
if __name__ == '__main__':
main()
| 3.09375
| 3
|
upython_esp32/device/boot.py
|
elecun/TimboBlockEditor
| 0
|
12776070
|
<gh_stars>0
# This file is executed on every boot (including wake-boot from deepsleep)
#import esp
#esp.osdebug(None)
#import webrepl
#webrepl.start()
try:
import usocket as socket
except:
import socket
import time
import esp
esp.osdebug(None)
import gc
gc.collect()
#connect to network via wifi
ssid = 'U+Net76AB'
password = '<PASSWORD>'
import wlan
wlan.start_wlan(ssid, password, 32)
#exec(open('./net/broker.py').read())
| 2.03125
| 2
|
corrector_module/opmon_corrector/document_manager.py
|
nordic-institute/X-Road-Metrics
| 2
|
12776071
|
""" Document Manager - Corrector Module
"""
# The MIT License
# Copyright (c) 2021- Nordic Institute for Interoperability Solutions (NIIS)
# Copyright (c) 2017-2020 Estonian Information System Authority (RIA)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import collections
import hashlib
from .logger_manager import LoggerManager
from . import __version__
class DocumentManager:
def __init__(self, settings):
self.calc = settings['corrector']['calc']
self.logger_m = LoggerManager(settings['logger'], settings['xroad']['instance'], __version__)
self.TIME_WINDOW = settings['corrector']['time-window']
self.COMPARISON_LIST = settings['corrector']['comparison-list']
self.orphan_comparison_list = settings['corrector']['comparison_list_orphan']
self.must_fields = (
'monitoringDataTs',
'securityServerInternalIp',
'securityServerType',
'requestInTs',
'requestOutTs',
'responseInTs',
'responseOutTs',
'clientXRoadInstance',
'clientMemberClass',
'clientMemberCode',
'clientSubsystemCode',
'serviceXRoadInstance',
'serviceMemberClass',
'serviceMemberCode',
'serviceSubsystemCode',
'serviceCode',
'serviceVersion',
'representedPartyClass',
'representedPartyCode',
'messageId',
'messageUserId',
'messageIssue',
'messageProtocolVersion',
'clientSecurityServerAddress',
'serviceSecurityServerAddress',
'requestSize',
'requestMimeSize',
'requestAttachmentCount',
'responseSize',
'responseMimeSize',
'responseAttachmentCount',
'succeeded',
'faultCode',
'faultString',
'serviceType'
)
@staticmethod
def subtract_or_none(a, b):
return None if None in [a, b] else a - b
def _client_calculations(self, in_doc):
"""
Calculates client specific parameters.
:param in_doc: The input document.
:return: Returns the document with applied calculations.
"""
client = in_doc.get('client') or {}
request_in = client.get('requestInTs')
request_out = client.get('requestOutTs')
response_in = client.get('responseInTs')
response_out = client.get('responseOutTs')
if self.calc['total-duration']:
in_doc['totalDuration'] = self.subtract_or_none(response_out, request_in)
if self.calc['client-request-duration']:
in_doc['clientSsRequestDuration'] = self.subtract_or_none(request_out, request_in)
if self.calc['client-response-duration']:
in_doc['clientSsResponseDuration'] = self.subtract_or_none(response_out, response_in)
if self.calc['producer-duration-client-view']:
in_doc['producerDurationClientView'] = self.subtract_or_none(response_in, request_out)
return in_doc
def _producer_calculations(self, in_doc):
"""
Calculates producer specific parameters.
:param in_doc: The input document.
:return: Returns the document with applied calculations.
"""
producer = in_doc.get('producer') or {}
request_in = producer.get('requestInTs')
request_out = producer.get('requestOutTs')
response_in = producer.get('responseInTs')
response_out = producer.get('responseOutTs')
if self.calc['producer-duration-producer-view']:
in_doc['producerDurationProducerView'] = self.subtract_or_none(response_out, request_in)
if self.calc['producer-request-duration']:
in_doc['producerSsRequestDuration'] = self.subtract_or_none(request_out, request_in)
if self.calc['producer-response-duration']:
in_doc['producerSsResponseDuration'] = self.subtract_or_none(response_out, response_in)
if self.calc['producer-is-duration']:
in_doc['producerIsDuration'] = self.subtract_or_none(response_in, request_out)
return in_doc
def _pair_calculations(self, in_doc):
"""
Calculates pair specific parameters.
:param in_doc: The input document.
:return: Returns the document with applied calculations.
"""
client = in_doc.get('client') or {}
producer = in_doc.get('producer') or {}
producer_request_in = producer.get('requestInTs')
producer_response_out = producer.get('responseOutTs')
client_response_in = client.get('responseInTs')
client_request_out = client.get('requestOutTs')
if self.calc['request-nw-duration']:
in_doc['requestNwDuration'] = self.subtract_or_none(producer_request_in, client_request_out)
if self.calc['response-nw-duration']:
in_doc['responseNwDuration'] = self.subtract_or_none(client_response_in, producer_response_out)
if self.calc['request-size']:
in_doc['clientRequestSize'] = self.calculate_transaction_size(client, 'request')
in_doc['producerRequestSize'] = self.calculate_transaction_size(producer, 'request')
if self.calc['response-size']:
in_doc['clientResponseSize'] = self.calculate_transaction_size(client, 'response')
in_doc['producerResponseSize'] = self.calculate_transaction_size(producer, 'response')
return in_doc
@staticmethod
def calculate_transaction_size(document_member: dict, transaction_type: str):
if transaction_type not in ['request', 'response']:
return None
size = None
try:
if document_member[f'{transaction_type}AttachmentCount'] in [0, None]:
size = document_member[f'{transaction_type}Size']
elif document_member[f'{transaction_type}AttachmentCount'] > 0:
size = document_member[f'{transaction_type}MimeSize']
except (TypeError, ValueError, KeyError):
pass
return size
@staticmethod
def get_boundary_value(value):
"""
Fixes the minimum value at -2 ** 31 + 1 and maximum value at 2 ** 31 - 1.
:param value: The value to be checked.
:return: Returns either the input value or the min_value or the max_value based on the input value.
"""
lo = -2 ** 31 + 1
hi = 2 ** 31 - 1
return None if value is None else max(min(value, hi), lo)
def _limit_calculation_values(self, document):
"""
Limits all the calculated values to either -2 ** 31 + 1 (min) or 2 ** 31 - 1 (max).
:param document: The input document.
:return: Returns the document with fixed values.
"""
keys_to_limit = [
'clientSsResponseDuration',
'producerSsResponseDuration',
'requestNwDuration',
'totalDuration',
'producerDurationProducerView',
'responseNwDuration',
'producerResponseSize',
'producerDurationClientView',
'clientResponseSize',
'producerSsRequestDuration',
'clientRequestSize',
'clientSsRequestDuration',
'producerRequestSize',
'producerIsDuration'
]
return {
key: (self.get_boundary_value(value) if key in keys_to_limit else value)
for (key, value) in document.items()
}
def apply_calculations(self, in_doc):
"""
Calls out all the functions to perform the calculations.
:param in_doc: The input document.
:return: Returns the document with the applied calculations.
"""
in_doc = self._client_calculations(in_doc)
in_doc = self._producer_calculations(in_doc)
in_doc = self._pair_calculations(in_doc)
in_doc = self._limit_calculation_values(in_doc)
return in_doc
def match_documents(self, document_a, document_b, orphan=False):
"""
Tries to match 2 regular documents.
:param document_a: The input document A.
:param document_b: The input document B.
:param orphan: Set to True to match orphan documents.
:return: Returns True if the given documents match.
"""
if None in [document_a, document_b]:
return False
# Divide document into client and producer
security_type = document_a.get('securityServerType', None)
if security_type == 'Client':
client = document_a
producer = document_b.get('producer')
elif security_type == 'Producer':
producer = document_a
client = document_b.get('client')
else:
# If here, Something is wrong
self.logger_m.log_error('document_manager',
'Unknown matching type between {0} and {1}'.format(document_a, document_b))
return False
# Check if client/producer object exist
if client is None or producer is None:
return False
# Check time exists
if client.get('requestInTs') is None or producer.get('requestInTs') is None:
return False
# Check time difference
if abs(client['requestInTs'] - producer['requestInTs']) > self.TIME_WINDOW:
return False
# Check attribute list
attributes = self.orphan_comparison_list if orphan else self.COMPARISON_LIST
for attribute in attributes:
if client.get(attribute, None) != producer.get(attribute, None):
return False
# If here, all matching conditions are OK
return True
def find_match(self, document_a, documents_list, orphan=False):
"""
Performs the regular match for given document in the given document_list.
:param document_a: The document to be matched.
:param documents_list: The list of documents to search the match from.
:param orphan: Set to True to match orphan documents
:return: Returns the matching document. If no match found, returns None.
"""
for cur_document in documents_list:
if self.match_documents(document_a, cur_document, orphan):
return cur_document
return None
@staticmethod
def create_json(client_document, producer_document, client_hash, producer_hash, message_id):
"""
Creates the basic JSON document that includes both client and producer
:param client_document: The client document.
:param producer_document: The producer document.
:param client_hash: Client hash.
:param producer_hash: Producer hash.
:param message_id: Message_id.
:return: Returns the document that includes all the fields.
"""
return {
'client': client_document,
'producer': producer_document,
'clientHash': client_hash,
'producerHash': producer_hash,
'messageId': message_id
}
def correct_structure(self, doc):
"""
Check that documents have all required fields.
Try to fill in missing fields by heuristics or set them to None as last resort.
:param doc: The input document.
:return: Returns the corrected document.
"""
if 'requestSize' not in doc:
doc['requestSize'] = doc.get('requestSoapSize') or doc.get('requestRestSize')
if 'responseSize' not in doc:
doc['responseSize'] = doc.get('responseSoapSize') or doc.get('responseRestSize')
if 'serviceType' not in doc and doc.get('responseSoapSize') is not None:
doc['serviceType'] = 'WSDL'
if 'serviceType' not in doc and doc.get('responseRestSize') is not None:
doc['serviceType'] = 'REST'
for f in self.must_fields:
if f not in doc:
doc[f] = None
return doc
@staticmethod
def calculate_hash(_document):
"""
Hash the given document with MD5 and remove _id & insertTime parameters.
:param _document: The input documents.
:return: Returns the monitoringDataTs_document_hash string.
"""
document = _document.copy()
doc_hash = None
if document is not None:
od = collections.OrderedDict(sorted(document.items()))
od.pop('_id', None)
od.pop('insertTime', None)
od.pop('corrected', None)
json_str = str(od)
doc_hash = hashlib.md5(json_str.encode('utf-8')).hexdigest()
return "{0}_{1}".format(document['monitoringDataTs'], doc_hash)
| 1.710938
| 2
|
peerdrive/client/extract-public.image.py
|
DavidAlphaFox/chef-server
| 0
|
12776072
|
<reponame>DavidAlphaFox/chef-server
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#
# PeerDrive
# Copyright (C) 2011 <NAME> <<EMAIL> DOT k<EMAIL> AT freenet DOT de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, json
from PyQt4 import QtGui
# create a QApplication; QImage needs it
app = QtGui.QApplication(sys.argv)
image = QtGui.QImage()
image.load(sys.argv[1].decode('utf8'))
data = {
"public.image" : {
"width" : image.width(),
"height" : image.height()
}
}
print json.dumps(data)
| 2
| 2
|
newjunk.py
|
projecthexa/hexa
| 7
|
12776073
|
<reponame>projecthexa/hexa
__author__ = 'guru'
import binascii
dataSting = '0005010203041122334400b9'
def checkSum(dataString,write):
totalSum = 0
for i in range(0,22):
if( i%2 == 0):
totalSum += int(dataString[i:i+2],16)
checkSum = dataString[22:24]
tempString = hex(totalSum)[::-1] #[::-1] reverses the string
if((tempString[0:2][::-1] == checkSum) or (write == 1)):
chkString = tempString[0:2][::-1]
return (1,chkString)
else:
chkString = '0'
return (0,chkString)
a = checkSum(dataSting,0)
print('yo >>>',a)
channel = dataSting[0:2]
command = dataSting[2:4]
param11= dataSting[4:6]
param12= dataSting[6:8]
param21= dataSting[8:10]
param22 =dataSting[10:12]
dataSize11=dataSting[12:14]
dataSize12=dataSting[14:16]
dataSize21=dataSting[16:18]
dataSize22=dataSting[18:20]
errCode = dataSting[20:22]
def dataCompiler(channel,command,param11,param12,param21,param22,dataSize11,dataSize12,dataSize21,dataSize22,errCode):
All = (channel+command+param11+param12+param21+param22+dataSize11+dataSize12+dataSize21+dataSize22+errCode+'00')
temp = checkSum(All,1)
if( temp[0] == 1):
All = All[0:22]
All+=temp[1]
new = temp[0]
return All
else:
return 0
#final = dataCompiler(channel,command,param11,param12,param21,param22,dataSize11,dataSize12,dataSize21,dataSize22,errCode)
#print('wow',final)
def fpsTransmitter(data):
print('data:',data)
print('data :',binascii.unhexlify(data))
def registrationDataGenerator():
#CMD_FP_REGISTER_START(0x51)_
channel = '00'
command = ['50','51','19']
param11= ['ff','00']
param12= ['ff','00']
param21= '00'
param22 = '00'
dataSize11= ['05','00']
dataSize12= '00'
dataSize21= '00'
dataSize22= '00'
errCode = '00'
data = dataCompiler(channel,command[0],param11[0],param12[0],param21,param22,dataSize11[0],dataSize12,dataSize21,dataSize22,errCode)
fpsTransmitter(data)
data = dataCompiler(channel,command[1],param11[1],param12[1],param21,param22,dataSize11[1],dataSize12,dataSize21,dataSize22,errCode)
fpsTransmitter(data)
data = dataCompiler(channel,command[2],param11[1],param12[1],param21,param22,dataSize11[1],dataSize12,dataSize21,dataSize22,errCode)
fpsTransmitter(data)
data = dataCompiler(channel,command[0],param11[0],param12[0],param21,param22,dataSize11[0],dataSize12,dataSize21,dataSize22,errCode)
fpsTransmitter(data)
data = dataCompiler(channel,command[1],param11[1],param12[1],param21,param22,dataSize11[1],dataSize12,dataSize21,dataSize22,errCode)
fpsTransmitter(data)
def initiateRegistration(mobileNumber):
data = dataCompiler('00','50','ff','ff','00','00','05','00','00','00','00')
fpsTransmitter(data+mobileNumber)
str=fpsReceiver()
chk=checkSum(str,0)
if (chk[0] == 1):
if(str[2:4] == '50' and str[4:6] == 'ff' and str[6:8] == 'ff' and str[20:22] == '00'):
return (1,'00')
else:
return (0,'00')
else:
return (0,str[20:22])
initiateRegistration("7790844870")
| 2.84375
| 3
|
cleverhans/experimental/certification/optimization.py
|
anant90/cleverhans
| 0
|
12776074
|
"""Code for setting up the optimization problem for certification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import numpy as np
from scipy.sparse.linalg import eigs, LinearOperator
import tensorflow as tf
from tensorflow.contrib import autograph
from cleverhans.experimental.certification import utils
# Bound on lowest value of certificate to check for numerical errors
LOWER_CERT_BOUND = -10.0
UPDATE_PARAM_CONSTANT = -0.1
class Optimization(object):
"""Class that sets up and runs the optimization of dual_formulation"""
def __init__(self, dual_formulation_object, sess, optimization_params):
"""Initialize the class variables.
Args:
dual_formulation_object: Instance of DualFormulation that contains the
dual variables and objective
sess: tf session to be used to run
optimization_params: Dictionary with the following
eig_num_iter - Number of iterations to run for computing minimum eigen
value
eig_learning_rate - Learning rate for minimum eigen value iterations
init_smooth - Starting value of the smoothness parameter (typically
around 0.001)
smooth_decay - The factor by which to decay after every outer loop epoch
optimizer - one of gd, adam, momentum or adagrad
eig_type - The method to compute eigenvalues (TF or SCIPY)
"""
self.sess = sess
self.dual_object = dual_formulation_object
self.params = optimization_params
self.penalty_placeholder = tf.placeholder(tf.float32, shape=[])
# The dimensionality of matrix M is the sum of sizes of all layers + 1
# The + 1 comes due to a row and column of M representing the linear terms
self.eig_init_vec_placeholder = tf.placeholder(
tf.float32, shape=[1 + self.dual_object.dual_index[-1], 1])
self.smooth_placeholder = tf.placeholder(tf.float32, shape=[])
self.eig_num_iter_placeholder = tf.placeholder(tf.int32, shape=[])
self.current_eig_val_estimate = None
# Create graph for optimization
self.prepare_for_optimization()
def tf_min_eig_vec(self):
"""Function for min eigen vector using tf's full eigen decomposition."""
# Full eigen decomposition requires the explicit psd matrix M
_, matrix_m = self.dual_object.get_full_psd_matrix()
[eig_vals, eig_vectors] = tf.self_adjoint_eig(matrix_m)
index = tf.argmin(eig_vals)
return tf.reshape(
eig_vectors[:, index], shape=[eig_vectors.shape[0].value, 1])
def tf_smooth_eig_vec(self):
"""Function that returns smoothed version of min eigen vector."""
_, matrix_m = self.dual_object.get_full_psd_matrix()
# Easier to think in terms of max so negating the matrix
[eig_vals, eig_vectors] = tf.self_adjoint_eig(-matrix_m)
exp_eig_vals = tf.exp(tf.divide(eig_vals, self.smooth_placeholder))
scaling_factor = tf.reduce_sum(exp_eig_vals)
# Multiplying each eig vector by exponential of corresponding eig value
# Scaling factor normalizes the vector to be unit norm
eig_vec_smooth = tf.divide(
tf.matmul(eig_vectors, tf.diag(tf.sqrt(exp_eig_vals))),
tf.sqrt(scaling_factor))
return tf.reshape(
tf.reduce_sum(eig_vec_smooth, axis=1),
shape=[eig_vec_smooth.shape[0].value, 1])
def get_min_eig_vec_proxy(self, use_tf_eig=False):
"""Computes the min eigen value and corresponding vector of matrix M.
Args:
use_tf_eig: Whether to use tf's default full eigen decomposition
Returns:
eig_vec: Minimum absolute eigen value
eig_val: Corresponding eigen vector
"""
if use_tf_eig:
# If smoothness parameter is too small, essentially no smoothing
# Just output the eigen vector corresponding to min
return tf.cond(self.smooth_placeholder < 1E-8,
self.tf_min_eig_vec,
self.tf_smooth_eig_vec)
# Using autograph to automatically handle
# the control flow of minimum_eigen_vector
min_eigen_tf = autograph.to_graph(utils.minimum_eigen_vector)
def _vector_prod_fn(x):
return self.dual_object.get_psd_product(x)
estimated_eigen_vector = min_eigen_tf(
x=self.eig_init_vec_placeholder,
num_steps=self.eig_num_iter_placeholder,
learning_rate=self.params['eig_learning_rate'],
vector_prod_fn=_vector_prod_fn)
return estimated_eigen_vector
def get_scipy_eig_vec(self):
"""Computes scipy estimate of min eigenvalue for matrix M.
Returns:
eig_vec: Minimum absolute eigen value
eig_val: Corresponding eigen vector
"""
if not self.params['has_conv']:
matrix_m = self.sess.run(self.dual_object.matrix_m)
min_eig_vec_val, estimated_eigen_vector = eigs(matrix_m, k=1, which='SR',
tol=1E-4)
min_eig_vec_val = np.reshape(np.real(min_eig_vec_val), [1, 1])
return np.reshape(estimated_eigen_vector, [-1, 1]), min_eig_vec_val
else:
dim = self.dual_object.matrix_m_dimension
input_vector = tf.placeholder(tf.float32, shape=(dim, 1))
output_vector = self.dual_object.get_psd_product(input_vector)
def np_vector_prod_fn(np_vector):
np_vector = np.reshape(np_vector, [-1, 1])
output_np_vector = self.sess.run(output_vector, feed_dict={input_vector:np_vector})
return output_np_vector
linear_operator = LinearOperator((dim, dim), matvec=np_vector_prod_fn)
# Performing shift invert scipy operation when eig val estimate is available
min_eig_vec_val, estimated_eigen_vector = eigs(linear_operator,
k=1, which='SR', tol=1E-4)
min_eig_vec_val = np.reshape(np.real(min_eig_vec_val), [1, 1])
return np.reshape(estimated_eigen_vector, [-1, 1]), min_eig_vec_val
def prepare_for_optimization(self):
"""Create tensorflow op for running one step of descent."""
if self.params['eig_type'] == 'TF':
self.eig_vec_estimate = self.get_min_eig_vec_proxy()
else:
self.eig_vec_estimate = tf.placeholder(tf.float32, shape=(self.dual_object.matrix_m_dimension, 1))
self.stopped_eig_vec_estimate = tf.stop_gradient(self.eig_vec_estimate)
# Eig value is v^\top M v, where v is eigen vector
self.eig_val_estimate = tf.matmul(
tf.transpose(self.stopped_eig_vec_estimate),
self.dual_object.get_psd_product(self.stopped_eig_vec_estimate))
# Penalizing negative of min eigen value because we want min eig value
# to be positive
self.total_objective = (
self.dual_object.unconstrained_objective
+ 0.5 * tf.square(
tf.maximum(-self.penalty_placeholder * self.eig_val_estimate, 0)))
global_step = tf.Variable(0, trainable=False)
# Set up learning rate as a placeholder
self.learning_rate = tf.placeholder(tf.float32, shape=[])
# Set up the optimizer
if self.params['optimizer'] == 'adam':
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
elif self.params['optimizer'] == 'adagrad':
self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate)
elif self.params['optimizer'] == 'momentum':
self.optimizer = tf.train.MomentumOptimizer(
learning_rate=self.learning_rate,
momentum=self.params['momentum_parameter'],
use_nesterov=True)
else:
self.optimizer = tf.train.GradientDescentOptimizer(
learning_rate=self.learning_rate)
# Write out the projection step
self.train_step = self.optimizer.minimize(
self.total_objective, global_step=global_step)
self.sess.run(tf.global_variables_initializer())
# Projecting the dual variables
proj_ops = []
for i in range(self.dual_object.nn_params.num_hidden_layers + 1):
# Lambda_pos is non negative for switch indices,
# Unconstrained for positive indices
# Zero for negative indices
proj_ops.append(self.dual_object.lambda_pos[i].assign(
tf.multiply(self.dual_object.positive_indices[i],
self.dual_object.lambda_pos[i])+
tf.multiply(self.dual_object.switch_indices[i],
tf.nn.relu(self.dual_object.lambda_pos[i]))))
proj_ops.append(self.dual_object.lambda_neg[i].assign(
tf.multiply(self.dual_object.negative_indices[i],
self.dual_object.lambda_neg[i])+
tf.multiply(self.dual_object.switch_indices[i],
tf.nn.relu(self.dual_object.lambda_neg[i]))))
# Lambda_quad is only non zero and positive for switch
proj_ops.append(self.dual_object.lambda_quad[i].assign(
tf.multiply(self.dual_object.switch_indices[i],
tf.nn.relu(self.dual_object.lambda_quad[i]))))
# Lambda_lu is always non negative
proj_ops.append(self.dual_object.lambda_lu[i].assign(
tf.nn.relu(self.dual_object.lambda_lu[i])))
self.proj_step = tf.group(proj_ops)
# Create folder for saving stats if the folder is not None
if (self.params.get('stats_folder') and
not tf.gfile.IsDirectory(self.params['stats_folder'])):
tf.gfile.MkDir(self.params['stats_folder'])
self.current_scipy_eig_val = None
def run_one_step(self, eig_init_vec_val, eig_num_iter_val, smooth_val,
penalty_val, learning_rate_val):
"""Run one step of gradient descent for optimization.
Args:
eig_init_vec_val: Start value for eigen value computations
eig_num_iter_val: Number of iterations to run for eigen computations
smooth_val: Value of smoothness parameter
penalty_val: Value of penalty for the current step
learning_rate_val: Value of learning rate
Returns:
found_cert: True is negative certificate is found, False otherwise
"""
# Project onto feasible set of dual variables
if self.current_step != 0 and self.current_step % self.params['projection_steps'] == 0:
current_certificate = self.dual_object.compute_certificate()
tf.logging.info('Inner step: %d, current value of certificate: %f',
self.current_step, current_certificate)
# Sometimes due to either overflow or instability in inverses,
# the returned certificate is large and negative -- keeping a check
if LOWER_CERT_BOUND < current_certificate < 0:
tf.logging.info('Found certificate of robustness!')
return True
# Running step
step_feed_dict = {self.eig_init_vec_placeholder: eig_init_vec_val,
self.eig_num_iter_placeholder: eig_num_iter_val,
self.smooth_placeholder: smooth_val,
self.penalty_placeholder: penalty_val,
self.learning_rate: learning_rate_val}
if self.params['eig_type'] == 'SCIPY':
current_eig_vector, self.current_eig_val_estimate = self.get_scipy_eig_vec()
step_feed_dict.update({
self.eig_vec_estimate: current_eig_vector
})
self.sess.run(self.train_step, feed_dict=step_feed_dict)
[
_, self.current_eig_vec_val, self.current_eig_val_estimate
] = self.sess.run([
self.proj_step,
self.eig_vec_estimate,
self.eig_val_estimate
], feed_dict=step_feed_dict)
if self.current_step % self.params['print_stats_steps'] == 0:
[self.current_total_objective, self.current_unconstrained_objective,
self.current_eig_vec_val,
self.current_eig_val_estimate,
self.current_nu] = self.sess.run(
[self.total_objective,
self.dual_object.unconstrained_objective,
self.eig_vec_estimate,
self.eig_val_estimate,
self.dual_object.nu], feed_dict=step_feed_dict)
stats = {
'total_objective':
float(self.current_total_objective),
'unconstrained_objective':
float(self.current_unconstrained_objective),
'min_eig_val_estimate':
float(self.current_eig_val_estimate)
}
tf.logging.debug('Current inner step: %d, optimization stats: %s',
self.current_step, stats)
if self.params['stats_folder'] is not None:
stats = json.dumps(stats)
filename = os.path.join(self.params['stats_folder'],
str(self.current_step) + '.json')
with tf.gfile.Open(filename) as file_f:
file_f.write(stats)
return False
def run_optimization(self):
"""Run the optimization, call run_one_step with suitable placeholders.
Returns:
True if certificate is found
False otherwise
"""
penalty_val = self.params['init_penalty']
# Don't use smoothing initially - very inaccurate for large dimension
self.smooth_on = False
smooth_val = 0
learning_rate_val = self.params['init_learning_rate']
self.current_outer_step = 1
while self.current_outer_step <= self.params['outer_num_steps']:
tf.logging.info('Running outer step %d with penalty %f',
self.current_outer_step, penalty_val)
# Running inner loop of optimization with current_smooth_val,
# current_penalty as smoothness parameters and penalty respectively
self.current_step = 0
# Run first step with random eig initialization and large number of steps
found_cert = self.run_one_step(
np.random.random(size=(1 + self.dual_object.dual_index[-1], 1)),
self.params['large_eig_num_steps'], smooth_val, penalty_val, learning_rate_val)
if found_cert:
return True
while self.current_step < self.params['inner_num_steps']:
self.current_step = self.current_step + 1
found_cert = self.run_one_step(self.current_eig_vec_val,
self.params['small_eig_num_steps'],
smooth_val, penalty_val,
learning_rate_val)
if found_cert:
return -1
# Update penalty only if it looks like current objective is optimizes
if self.current_total_objective < UPDATE_PARAM_CONSTANT:
penalty_val = penalty_val * self.params['beta']
learning_rate_val = learning_rate_val*self.params['learning_rate_decay']
else:
# To get more accurate gradient estimate
self.params['small_eig_num_steps'] = (
1.5 * self.params['small_eig_num_steps'])
# If eigen values seem small enough, turn on smoothing
# useful only when performing full eigen decomposition
if np.abs(self.current_eig_val_estimate) < 0.01:
smooth_val = self.params['smoothness_parameter']
self.current_outer_step = self.current_outer_step + 1
return False
| 2.75
| 3
|
Python/Uri 1011 - Esfera.py
|
Gui25Reis/URI
| 0
|
12776075
|
raio = int(input('Raio: '))
format(r,'.2f')
pi = 3.14159
volume = (4/3)*pi*raio**3
print("Volume = ", format(volume,'.3f'))
#Para o URI:
R = float(input())
format(R,'.2f')
pi = 3.14159
v = (4/3)*pi*R**3
print("VOLUME = "+format(v,'.3f'))
| 3.9375
| 4
|
oops_fhir/r4/code_system/explanation_of_benefit_status.py
|
Mikuana/oops_fhir
| 0
|
12776076
|
from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["ExplanationOfBenefitStatus"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class ExplanationOfBenefitStatus:
"""
ExplanationOfBenefitStatus
A code specifying the state of the resource instance.
Status: draft - Version: 4.0.1
Copyright None
http://hl7.org/fhir/explanationofbenefit-status
"""
active = CodeSystemConcept(
{
"code": "active",
"definition": "The resource instance is currently in-force.",
"display": "Active",
}
)
"""
Active
The resource instance is currently in-force.
"""
cancelled = CodeSystemConcept(
{
"code": "cancelled",
"definition": "The resource instance is withdrawn, rescinded or reversed.",
"display": "Cancelled",
}
)
"""
Cancelled
The resource instance is withdrawn, rescinded or reversed.
"""
draft = CodeSystemConcept(
{
"code": "draft",
"definition": "A new resource instance the contents of which is not complete.",
"display": "Draft",
}
)
"""
Draft
A new resource instance the contents of which is not complete.
"""
entered_in_error = CodeSystemConcept(
{
"code": "entered-in-error",
"definition": "The resource instance was entered in error.",
"display": "Entered In Error",
}
)
"""
Entered In Error
The resource instance was entered in error.
"""
class Meta:
resource = _resource
| 2.515625
| 3
|
loader.py
|
Flaiers/Youtube
| 2
|
12776077
|
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram import Bot, Dispatcher
from config import TOKEN
import logging
# уровень логов
logging.basicConfig(level=logging.INFO)
# место хранения данных FSM
storage = MemoryStorage()
# инициализация бота
bot = Bot(token=TOKEN, parse_mode='html')
dp = Dispatcher(bot, storage=storage)
| 2
| 2
|
flask_boilerplate/admin/__init__.py
|
wellls/flask_boilerplate
| 0
|
12776078
|
# -*- coding:utf-8 -*-
# __author__ = '<NAME>'
# 管理后台入口文件
from flask import Blueprint
admin = Blueprint('admin', __name__, template_folder='./templates', static_folder='./static')
theme = 'default'
from . import views
| 1.34375
| 1
|
tools/binfiles/hex2bin.py
|
poojabitripple/TvRQ
| 0
|
12776079
|
<reponame>poojabitripple/TvRQ<filename>tools/binfiles/hex2bin.py
#!/usr/bin/env python
from __future__ import print_function
import sys
orig = sys.stdin.read()
v = 0
isfirst = True
for x in orig:
if x.isspace():
continue
offs = "0123456789abcdef".find(x.lower())
if offs == -1:
sys.stderr.write("Error: Invalid character `%s'." % x)
sys.exit(1)
# Accumulate two chars.
if isfirst:
v = offs
isfirst = False
else:
bout = (16*v + offs).to_bytes(1, byteorder='big')
sys.stdout.buffer.write(bout)
v = 0
isfirst = True
| 2.453125
| 2
|
utils.py
|
sweemeng/devkami_dev_survey_streamlit
| 1
|
12776080
|
from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def get_count_except(counter, exception):
keys = []
results = []
for key in counter:
if key == exception:
continue
keys.append(key)
results.append(counter[key])
return keys, results
def count_cols(series):
counter = Counter()
for item in series:
counter.update(item.split(', '))
return counter
def generate_hbar(x, y, xlabel, ylabel, title):
plt.figure(figsize=(20,10))
x_pos = [i for i, _ in enumerate(x)]
plt.barh(x_pos, y)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.yticks(x_pos, x)
plt.title(title)
plt.show()
def generate_hbar_subplot(x, y, xlabel, ylabel, title, ax):
x_pos = [i for i, _ in enumerate(x)]
ax.barh(x_pos, y)
ax.set(xlabel=xlabel)
ax.set(ylabel=ylabel)
ax.set_yticks(x_pos)
ax.set_yticklabels(x)
ax.set_title(title)
def get_nested_values(series):
results = set()
for item in series:
results.update(item.split(', '))
return list(results)
def filter_by_language(df, value):
result = []
for col in df['language_used']:
if value in col.split(', '):
result.append(True)
else:
result.append(False)
return result
def filter_by_ide(df, value):
result = []
for col in df['ide_used']:
if value in col.split(', '):
result.append(True)
else:
result.append(False)
return result
def filter_by_field(field, df, value):
result = []
for col in df[field]:
if value in col.split(', '):
result.append(True)
else:
result.append(False)
return result
def load_data(file_name):
df = pd.read_csv("devkami_survey.tsv",sep="\t")
remap_columns = {
'What language do you use': 'language_used',
'What kind of application do you build?': 'application_built',
'What OS you deployed to': 'os_deployed',
'What OS you write your code on': 'os_coded',
'What IDE do you use': 'ide_used',
'What Version control do you use': 'vcs_used',
'How do you test your application?': 'app_test',
'Tell us more about your development setup. Tell us things like the plugin you use on your IDE, whether you use docker or kubernetes, do you code using remote development tools etc.': 'dev_setup',
'Tell us about your computer. Tell us about the spec, which model etc': 'computer_model',
'How do you deploy your application? Tell us whether you build an docker image or use a script etc.': 'deploy_method',
'What issue tracker you use in your team?': 'tracker_used',
'Do you do standup in your work place': 'standup',
'Do your team do sprint planning': 'sprint_planning',
'Tell us more about your development process. What else your team do other than standup and sprint planning': 'dev_process',
}
df.rename(columns=remap_columns, inplace=True)
df.replace(np.nan,'n/a',inplace=True)
return df
| 2.9375
| 3
|
src/login/urls.py
|
gucunskidj/exam-registration
| 0
|
12776081
|
<filename>src/login/urls.py<gh_stars>0
from django.urls import path
from . import views
#from django.contrib.auth import views as auth_views IND
urlpatterns = [
path('', views.loginPage, name='login'),
path('prijavi_ispite/', views.prijavi_ispite, name='prijavi_ispite'),
path('prijavljeni_ispiti/', views.prijavljeni_ispiti, name='prijavljeni_ispiti'),
path('logout/', views.logoutUser, name='logout'),
]
| 1.851563
| 2
|
examples/basic-example.py
|
deephyper/metalgpy
| 0
|
12776082
|
<reponame>deephyper/metalgpy
from numpy.random.mtrand import sample
import metalgpy as mpy
# the @mpy.meta decorator transform an original python code
# into a meta-program. f is now symbolizing the original python code
@mpy.meta
def f(x):
return x**2
# program is a symbol representing the call to f (original python code)
# where the input is a symbol representing a variable List (categorical decision variable)
program = f(mpy.List([0,1,2,3,4]))
print("Program: ", program, end="\n\n")
# the choice method returns the variable symbols of the symbolized program
choices = program.choices()
print("Variable Space: ", choices)
# mpy.sample(n, program) generates clones of the symbolized program
for _, sample_program in mpy.sample(program, size=5):
print("\n ** new random program **")
print(f"{sample_program} = {sample_program.evaluate()}")
| 3.46875
| 3
|
backend/puzzle/serializers/comment.py
|
mductran/puzzle
| 0
|
12776083
|
<gh_stars>0
from rest_framework import serializers
from puzzle.models import Comment
from puzzle.models import Account
class CommentSerializer(serializers.ModelSerializer):
author_name = serializers.CharField(source="author.user.username", read_only=True)
class Meta:
model = Comment
fields = ["id", "content", "created", "updated", "author_id", "post_id", "author_name"]
def create(self, validated_data):
print('\nVALIDATED DATA: ', validated_data)
comment_instance = Comment.objects.create(**validated_data)
return comment_instance.validated_data
| 2.15625
| 2
|
src/adresses/api.py
|
marquesds/zipnator
| 0
|
12776084
|
<reponame>marquesds/zipnator
import logging
from django.conf import settings
from django.http import Http404
from adresses import InvalidZipcodeException
from adresses.models import Address
from adresses.helpers import validate_zipcode
from restless.dj import DjangoResource
from restless.preparers import FieldsPreparer
import requests
logger = logging.getLogger('zipnator')
class AddressResource(DjangoResource):
preparer = FieldsPreparer(fields={
'id': 'id',
'street': 'street',
'district': 'district',
'city': 'city',
'state': 'state',
'zipcode': 'zipcode',
})
def is_authenticated(self):
return True
def create(self):
validate_zipcode(self.data['zipcode'])
zipcode_url = settings.ZIPCODE_PROVIDER + self.data['zipcode']
resp = requests.get(zipcode_url)
if resp.status_code == 404:
msg = 'Zipcode not found'
logger.error(msg)
raise Http404(msg)
else:
results = resp.json()
data = {
'street': results.get('logradouro'),
'district': results.get('bairro'),
'city': results.get('cidade'),
'state': results.get('estado'),
'zipcode': results.get('cep')
}
return Address.objects.create(**data)
def delete(self, pk):
validate_zipcode(pk)
pk = pk.replace('-', '')
Address.objects.get(zipcode=pk).delete()
def list(self):
limit = self.request.GET.get('limit', None)
limit = int(limit) if limit else limit
return Address.objects.all()[:limit]
def detail(self, pk):
validate_zipcode(pk)
pk = pk.replace('-', '')
return Address.objects.get(zipcode=pk)
| 2.109375
| 2
|
env/lib/python3.6/site-packages/pytests/test_filters.py
|
rogerscristo/BotFWD
| 0
|
12776085
|
<gh_stars>0
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import datetime
import pytest
from telegram import Message, User, Chat, MessageEntity
from telegram.ext import Filters, BaseFilter
@pytest.fixture()
def message():
return Message(0, User(0, "Testuser"), datetime.datetime.now(), Chat(0, 'private'))
@pytest.fixture(scope="function",
params=MessageEntity.ALL_TYPES)
def message_entity(request):
return MessageEntity(type=request.param, offset=0, length=0, url="", user="")
class TestFilters:
def test_filters_all(self, message):
assert Filters.all(message)
def test_filters_text(self, message):
message.text = 'test'
assert Filters.text(message)
message.text = '/test'
assert not Filters.text(message)
def test_filters_command(self, message):
message.text = 'test'
assert not Filters.command(message)
message.text = '/test'
assert Filters.command(message)
def test_filters_reply(self, message):
another_message = Message(1, User(1, "TestOther"), datetime.datetime.now(),
Chat(0, 'private'))
message.text = 'test'
assert not Filters.reply(message)
message.reply_to_message = another_message
assert Filters.reply(message)
def test_filters_audio(self, message):
assert not Filters.audio(message)
message.audio = 'test'
assert Filters.audio(message)
def test_filters_document(self, message):
assert not Filters.document(message)
message.document = 'test'
assert Filters.document(message)
def test_filters_photo(self, message):
assert not Filters.photo(message)
message.photo = 'test'
assert Filters.photo(message)
def test_filters_sticker(self, message):
assert not Filters.sticker(message)
message.sticker = 'test'
assert Filters.sticker(message)
def test_filters_video(self, message):
assert not Filters.video(message)
message.video = 'test'
assert Filters.video(message)
def test_filters_voice(self, message):
assert not Filters.voice(message)
message.voice = 'test'
assert Filters.voice(message)
def test_filters_contact(self, message):
assert not Filters.contact(message)
message.contact = 'test'
assert Filters.contact(message)
def test_filters_location(self, message):
assert not Filters.location(message)
message.location = 'test'
assert Filters.location(message)
def test_filters_venue(self, message):
assert not Filters.venue(message)
message.venue = 'test'
assert Filters.venue(message)
def test_filters_status_update(self, message):
assert not Filters.status_update(message)
message.new_chat_members = ['test']
assert Filters.status_update(message)
assert Filters.status_update.new_chat_members(message)
message.new_chat_members = None
message.left_chat_member = 'test'
assert Filters.status_update(message)
assert Filters.status_update.left_chat_member(message)
message.left_chat_member = None
message.new_chat_title = 'test'
assert Filters.status_update(message)
assert Filters.status_update.new_chat_title(message)
message.new_chat_title = ''
message.new_chat_photo = 'test'
assert Filters.status_update(message)
assert Filters.status_update.new_chat_photo(message)
message.new_chat_photo = None
message.delete_chat_photo = True
assert Filters.status_update(message)
assert Filters.status_update.delete_chat_photo(message)
message.delete_chat_photo = False
message.group_chat_created = True
assert Filters.status_update(message)
assert Filters.status_update.chat_created(message)
message.group_chat_created = False
message.supergroup_chat_created = True
assert Filters.status_update(message)
assert Filters.status_update.chat_created(message)
message.supergroup_chat_created = False
message.channel_chat_created = True
assert Filters.status_update(message)
assert Filters.status_update.chat_created(message)
message.channel_chat_created = False
message.migrate_to_chat_id = 100
assert Filters.status_update(message)
assert Filters.status_update.migrate(message)
message.migrate_to_chat_id = 0
message.migrate_from_chat_id = 100
assert Filters.status_update(message)
assert Filters.status_update.migrate(message)
message.migrate_from_chat_id = 0
message.pinned_message = 'test'
assert Filters.status_update(message)
assert Filters.status_update.pinned_message(message)
message.pinned_message = None
def test_filters_forwarded(self, message):
assert not Filters.forwarded(message)
message.forward_date = 'test'
assert Filters.forwarded(message)
def test_filters_game(self, message):
assert not Filters.game(message)
message.game = 'test'
assert Filters.game(message)
def test_entities_filter(self, message, message_entity):
message.entities = [message_entity]
assert Filters.entity(message_entity.type)(message)
message.entities = []
assert not Filters.entity(MessageEntity.MENTION)(message)
second = message_entity.to_dict()
second['type'] = 'bold'
second = MessageEntity.de_json(second, None)
message.entities = [message_entity, second]
assert Filters.entity(message_entity.type)(message)
def test_private_filter(self, message):
assert Filters.private(message)
message.chat.type = "group"
assert not Filters.private(message)
def test_group_filter(self, message):
assert not Filters.group(message)
message.chat.type = "group"
assert Filters.group(message)
message.chat.type = "supergroup"
assert Filters.group(message)
def test_filters_user(self):
with pytest.raises(ValueError, match='user_id or username'):
Filters.user(user_id=1, username='user')
with pytest.raises(ValueError, match='user_id or username'):
Filters.user()
def test_filters_user_id(self, message):
assert not Filters.user(user_id=1)(message)
message.from_user.id = 1
assert Filters.user(user_id=1)(message)
message.from_user.id = 2
assert Filters.user(user_id=[1, 2])(message)
assert not Filters.user(user_id=[3, 4])(message)
def test_filters_username(self, message):
assert not Filters.user(username='user')(message)
assert not Filters.user(username='Testuser')(message)
message.from_user.username = 'user'
assert Filters.user(username='@user')(message)
assert Filters.user(username='user')(message)
assert Filters.user(username=['user1', 'user', 'user2'])(message)
assert not Filters.user(username=['@username', '@user_2'])(message)
def test_filters_chat(self):
with pytest.raises(ValueError, match='chat_id or username'):
Filters.chat(chat_id=-1, username='chat')
with pytest.raises(ValueError, match='chat_id or username'):
Filters.chat()
def test_filters_chat_id(self, message):
assert not Filters.chat(chat_id=-1)(message)
message.chat.id = -1
assert Filters.chat(chat_id=-1)(message)
message.chat.id = -2
assert Filters.chat(chat_id=[-1, -2])(message)
assert not Filters.chat(chat_id=[-3, -4])(message)
def test_filters_chat_username(self, message):
assert not Filters.chat(username='chat')(message)
message.chat.username = 'chat'
assert Filters.chat(username='@chat')(message)
assert Filters.chat(username='chat')(message)
assert Filters.chat(username=['chat1', 'chat', 'chat2'])(message)
assert not Filters.chat(username=['@chat1', 'chat_2'])(message)
def test_filters_invoice(self, message):
assert not Filters.invoice(message)
message.invoice = 'test'
assert Filters.invoice(message)
def test_filters_successful_payment(self, message):
assert not Filters.successful_payment(message)
message.successful_payment = 'test'
assert Filters.successful_payment(message)
def test_language_filter_single(self, message):
message.from_user.language_code = 'en_US'
assert (Filters.language('en_US'))(message)
assert (Filters.language('en'))(message)
assert not (Filters.language('en_GB'))(message)
assert not (Filters.language('da'))(message)
message.from_user.language_code = 'da'
assert not (Filters.language('en_US'))(message)
assert not (Filters.language('en'))(message)
assert not (Filters.language('en_GB'))(message)
assert (Filters.language('da'))(message)
def test_language_filter_multiple(self, message):
f = Filters.language(['en_US', 'da'])
message.from_user.language_code = 'en_US'
assert f(message)
message.from_user.language_code = 'en_GB'
assert not f(message)
message.from_user.language_code = 'da'
assert f(message)
def test_and_filters(self, message):
message.text = 'test'
message.forward_date = True
assert (Filters.text & Filters.forwarded)(message)
message.text = '/test'
assert not (Filters.text & Filters.forwarded)(message)
message.text = 'test'
message.forward_date = None
assert not (Filters.text & Filters.forwarded)(message)
message.text = 'test'
message.forward_date = True
assert (Filters.text & Filters.forwarded & Filters.private)(message)
def test_or_filters(self, message):
message.text = 'test'
assert (Filters.text | Filters.status_update)(message)
message.group_chat_created = True
assert (Filters.text | Filters.status_update)(message)
message.text = None
assert (Filters.text | Filters.status_update)(message)
message.group_chat_created = False
assert not (Filters.text | Filters.status_update)(message)
def test_and_or_filters(self, message):
message.text = 'test'
message.forward_date = True
assert (Filters.text & (Filters.forwarded | Filters.status_update))(message)
message.forward_date = False
assert not (Filters.text & (Filters.forwarded | Filters.status_update))(message)
message.pinned_message = True
assert (Filters.text & (Filters.forwarded | Filters.status_update)(message))
assert str((Filters.text & (Filters.forwarded | Filters.entity(
MessageEntity.MENTION)))) == '<Filters.text and <Filters.forwarded or ' \
'Filters.entity(mention)>>'
def test_inverted_filters(self, message):
message.text = '/test'
assert Filters.command(message)
assert not (~Filters.command)(message)
message.text = 'test'
assert not Filters.command(message)
assert (~Filters.command)(message)
def test_inverted_and_filters(self, message):
message.text = '/test'
message.forward_date = 1
assert (Filters.forwarded & Filters.command)(message)
assert not (~Filters.forwarded & Filters.command)(message)
assert not (Filters.forwarded & ~Filters.command)(message)
assert not (~(Filters.forwarded & Filters.command))(message)
message.forward_date = None
assert not (Filters.forwarded & Filters.command)(message)
assert (~Filters.forwarded & Filters.command)(message)
assert not (Filters.forwarded & ~Filters.command)(message)
assert (~(Filters.forwarded & Filters.command))(message)
message.text = 'test'
assert not (Filters.forwarded & Filters.command)(message)
assert not (~Filters.forwarded & Filters.command)(message)
assert not (Filters.forwarded & ~Filters.command)(message)
assert (~(Filters.forwarded & Filters.command))(message)
def test_faulty_custom_filter(self, message):
class _CustomFilter(BaseFilter):
pass
custom = _CustomFilter()
with pytest.raises(NotImplementedError):
(custom & Filters.text)(message)
def test_custom_unnamed_filter(self, message):
class Unnamed(BaseFilter):
def filter(self, mes):
return True
unnamed = Unnamed()
assert str(unnamed) == Unnamed.__name__
| 2.3125
| 2
|
ooobuild/lo/chart/chart_document.py
|
Amourspirit/ooo_uno_tmpl
| 0
|
12776086
|
<filename>ooobuild/lo/chart/chart_document.py
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.chart
from abc import abstractproperty
from ..beans.x_property_set import XPropertySet as XPropertySet_bc180bfa
from .x_chart_document import XChartDocument as XChartDocument_d3630ca3
from ..drawing.x_draw_page_supplier import XDrawPageSupplier as XDrawPageSupplier_1a030eab
from ..xml.user_defined_attributes_supplier import UserDefinedAttributesSupplier as UserDefinedAttributesSupplier_9fbe1222
class ChartDocument(UserDefinedAttributesSupplier_9fbe1222, XPropertySet_bc180bfa, XChartDocument_d3630ca3, XDrawPageSupplier_1a030eab):
"""
Service Class
is the service for a chart document.
A chart document consists of a reference to the data source, the diagram and some additional elements like a main title, a sub-title or a legend.
**since**
OOo 1.1.2
See Also:
`API ChartDocument <https://api.libreoffice.org/docs/idl/ref/servicecom_1_1sun_1_1star_1_1chart_1_1ChartDocument.html>`_
"""
__ooo_ns__: str = 'com.sun.star.chart'
__ooo_full_ns__: str = 'com.sun.star.chart.ChartDocument'
__ooo_type_name__: str = 'service'
@abstractproperty
def HasLegend(self) -> bool:
"""
determines if the legend is shown or hidden.
"""
@abstractproperty
def HasMainTitle(self) -> bool:
"""
determines if the main title is shown or hidden.
"""
@abstractproperty
def HasSubTitle(self) -> bool:
"""
determines if the subtitle is shown or hidden.
"""
__all__ = ['ChartDocument']
| 1.382813
| 1
|
openstack_health/tests/test_status.py
|
MountakBernotas/https-github.com-openstack-openstack-health
| 0
|
12776087
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_health.test_run_aggregator import Status
from openstack_health.tests import base
class TestStatus(base.TestCase):
def test_that_success_string_translates_to_success(self):
status = Status('success')
self.assertEqual(True, status.is_success)
self.assertEqual(False, status.is_failure)
self.assertEqual(False, status.is_skip)
def test_that_xfail_string_translates_to_success(self):
status = Status('xfail')
self.assertEqual(True, status.is_success)
self.assertEqual(False, status.is_failure)
self.assertEqual(False, status.is_skip)
def test_that_fail_string_translates_to_failure(self):
status = Status('fail')
self.assertEqual(False, status.is_success)
self.assertEqual(True, status.is_failure)
self.assertEqual(False, status.is_skip)
def test_that_unxsuccess_string_translates_to_failure(self):
status = Status('unxsuccess')
self.assertEqual(False, status.is_success)
self.assertEqual(True, status.is_failure)
self.assertEqual(False, status.is_skip)
def test_that_null_translates_to_skip(self):
status = Status(None)
self.assertEqual(False, status.is_success)
self.assertEqual(False, status.is_failure)
self.assertEqual(True, status.is_skip)
def test_that_an_empty_string_translates_to_skip(self):
status = Status('')
self.assertEqual(False, status.is_success)
self.assertEqual(False, status.is_failure)
self.assertEqual(True, status.is_skip)
def test_that_a_random_string_translates_to_skip(self):
status = Status('$random1234')
self.assertEqual(False, status.is_success)
self.assertEqual(False, status.is_failure)
self.assertEqual(True, status.is_skip)
| 2.03125
| 2
|
torsionfit/qmscan/generate_dihedral.py
|
ChayaSt/torsionfit
| 14
|
12776088
|
<reponame>ChayaSt/torsionfit<filename>torsionfit/qmscan/generate_dihedral.py
__author__ = '<NAME>'
from pymol import stored, cmd
import os
import errno
def torsion_drive(atom1, atom2, atom3, atom4, interval, selection, path, mol_name,):
"""
This function generates input pdbs of dihedral angles selected of intervals specified with interval
:param atom1: name of atom 1 of dihedral
:param atom2: name of atom 2 of dihedral
:param atom3: name of atom 3 of dihedral
:param atom4: name of atom 4 of dihedral
:param interval: int or float (in degrees) of intervals to generate torsion scan for
:param selection: name of selection for molecule
:param path: path to where pdb files should be saved
:param mole_name: name of molecule to append to filenamen
"""
atom1 = selection + " and name " + atom1
atom2 = selection + " and name " + atom2
atom3 = selection + " and name " + atom3
atom4 = selection + " and name " + atom4
for angle in range(0, 360 + int(interval), int(interval)):
try:
os.makedirs('%s/%i' % (path, angle))
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
cmd.set_dihedral(atom1, atom2, atom3, atom4, angle)
filename = '%s/%i/%s_%i.pdb' % (path, angle, mol_name, angle)
cmd.save(filename, selection, 1)
cmd.extend("torsion_drive", torsion_drive)
| 2.640625
| 3
|
GidData/GidData.py
|
Aerobautics/google-images-download
| 0
|
12776089
|
<reponame>Aerobautics/google-images-download
#!/usr/bin/env python3
# Written by <NAME> (Aerobautics) November 2019
"""XML data processing for GID.
"""
from tkinter import *
from xml.dom.minidom import parse
from xml.dom.minidom import getDOMImplementation
import sys
import os
import errno
import xml.dom.minidom
sys.path.insert(1, '../')
from google_images_download import google_images_download
from GidResult import GidResult
from GidSearch import GidSearch
from GidSession import GidSession
from GidSettings import GidSettings
# GID stands for 'Google Image Downloader'
class GidData:
'This class contains the XML parsing functions.'
def __init__(self):
self.gidSession = []
self.sessionList = []
self._currentSession = None
self.currentSearch = []
self.searchList = []
self.sessionFile = []
def get_currentSession(self):
return self.currentSession
def set_currentSession(self, value):
self._currentSession = value
def populateSearch(self, search):
setting = GidSettings()
temporary = search.getElementsByTagName("config_file")
if temporary:
setting.config_file = temporary.childNodes[0].data
print(setting.config_file)
temporary = search.getElementsByTagName("keywords")
if temporary:
setting.keywords = temporary.childNodes[0].data
print(setting.keywords)
temporary = search.getElementsByTagName("keywords_from_file")
if temporary:
setting.keywords_from_file = temporary.childNodes[0].data
print(setting.keywords_from_file)
temporary = search.getElementsByTagName("prefix_keywords")
if temporary:
setting.prefix_keywords = temporary.childNodes[0].data
print(setting.prefix_keywords)
temporary = search.getElementsByTagName("suffix_keywords")
if temporary:
setting.suffix_keywords = temporary.childNodes[0].data
print(setting.suffix_keywords)
temporary = search.getElementsByTagName("limit")
if temporary:
setting.limit = temporary.childNodes[0].data
print(setting.limit)
temporary = search.getElementsByTagName("related_images")
if temporary:
setting.related_images = temporary.childNodes[0].data
print(setting.related_images)
temporary = search.getElementsByTagName("format")
if temporary:
setting.format = temporary.childNodes[0].data
print(setting.format)
temporary = search.getElementsByTagName("color")
if temporary:
setting.color = temporary.childNodes[0].data
print(setting.color)
temporary = search.getElementsByTagName("color_type")
if temporary:
setting.color_type = temporary.childNodes[0].data
print(setting.color_type)
temporary = search.getElementsByTagName("usage_rights")
if temporary:
setting.usage_rights = temporary.childNodes[0].data
print(setting.usage_rights)
temporary = search.getElementsByTagName("size")
if temporary:
setting.size = temporary.childNodes[0].data
print(setting.size)
temporary = search.getElementsByTagName("exact_size")
if temporary:
setting.exact_size = temporary.childNodes[0].data
print(setting.exact_size)
temporary = search.getElementsByTagName("aspect_ratio")
if temporary:
setting.aspect_ratio = temporary.childNodes[0].data
print(setting.aspect_ratio)
temporary = search.getElementsByTagName("type")
if temporary:
setting.type = temporary.childNodes[0].data
print(setting.type)
temporary = search.getElementsByTagName("time")
if temporary:
setting.time = temporary.childNodes[0].data
print(setting.time)
temporary = search.getElementsByTagName("delay")
if temporary:
setting.delay = temporary.childNodes[0].data
print(setting.delay)
temporary = search.getElementsByTagName("url")
if temporary:
setting.url = temporary.childNodes[0].data
print(setting.url)
temporary = search.getElementsByTagName("single_image")
if temporary:
setting.single_image = temporary.childNodes[0].data
print(setting.single_image)
temporary = search.getElementsByTagName("output_directory")
if temporary:
setting.output_directory = temporary.childNodes[0].data
print(setting.output_directory)
temporary = search.getElementsByTagName("image_directory")
if temporary:
setting.image_directory = temporary.childNodes[0].data
print(setting.image_directory)
temporary = search.getElementsByTagName("no_directory")
if temporary:
setting.no_directory = temporary.childNodes[0].data
print(setting.no_directory)
temporary = search.getElementsByTagName("proxy")
if temporary:
setting.proxy = temporary.childNodes[0].data
print(setting.proxy)
temporary = search.getElementsByTagName("similar_images")
if temporary:
setting.similar_images = temporary.childNodes[0].data
print(setting.similar_images)
temporary = search.getElementsByTagName("specific_site")
if temporary:
setting.specific_site = temporary.childNodes[0].data
print(setting.specific_site)
temporary = search.getElementsByTagName("print_urls")
if temporary:
setting.print_urls = temporary.childNodes[0].data
print(setting.print_urls)
temporary = search.getElementsByTagName("print_size")
if temporary:
setting.print_size = temporary.childNodes[0].data
print(setting.print_size)
temporary = search.getElementsByTagName("print_paths")
if temporary:
setting.print_paths = temporary.childNodes[0].data
print(setting.print_paths)
temporary = search.getElementsByTagName("metadata")
if temporary:
setting.metadata = temporary.childNodes[0].data
print(setting.metadata)
temporary = search.getElementsByTagName("extract_metadata")
if temporary:
setting.extract_metadata = temporary.childNodes[0].data
print(setting.extract_metadata)
temporary = search.getElementsByTagName("socket_timeout")
if temporary:
setting.socket_timeout = temporary.childNodes[0].data
print(setting.socket_timeout)
temporary = search.getElementsByTagName("thumbnail")
if temporary:
setting.thumbnail = temporary.childNodes[0].data
print(setting.thumbnail)
temporary = search.getElementsByTagName("thumbnail_only")
if temporary:
setting.thumbnail_only = temporary.childNodes[0].data
print(setting.thumbnail_only)
temporary = search.getElementsByTagName("language")
if temporary:
setting.language = temporary.childNodes[0].data
print(setting.language)
temporary = search.getElementsByTagName("prefix")
if temporary:
setting.prefix = temporary.childNodes[0].data
print(setting.prefix)
temporary = search.getElementsByTagName("chromedriver")
if temporary:
setting.chromedriver = temporary.childNodes[0].data
print(setting.chromedriver)
temporary = search.getElementsByTagName("safe_search")
if temporary:
setting.safe_search = temporary.childNodes[0].data
print(setting.safe_search)
temporary = search.getElementsByTagName("no_numbering")
if temporary:
setting.no_numbering = temporary.childNodes[0].data
print(setting.no_numbering)
temporary = search.getElementsByTagName("offset")
if temporary:
setting.offset = temporary.childNodes[0].data
print(setting.offset)
temporary = search.getElementsByTagName("save_source")
if temporary:
setting.save_source = temporary.childNodes[0].data
print(setting.save_source)
temporary = search.getElementsByTagName("no_download")
if temporary:
setting.no_download = temporary.childNodes[0].data
print(setting.no_download)
temporary = search.getElementsByTagName("silent_mode")
if temporary:
setting.silent_mode = temporary.childNodes[0].data
print(setting.silent_mode)
temporary = search.getElementsByTagName("ignore_urls")
if temporary:
setting.ignore_urls = temporary.childNodes[0].data
print(setting.ignore_urls)
temporary = search.getElementsByTagName("help")
if temporary:
setting.help = temporary.childNodes[0].data
print(setting.help)
output = GidSearch(setting)
if search.hasAttribute("identity"):
output.identity = search.getAttribute("identity")
return output
def readSession(self):
# Open XML document using the minidom parser
#filenames = []
session = None
session_location = os.path.join(os.path.realpath('.'), 'temp')
session_location = os.path.join(session_location, 'session.gid')
session_location = os.path.abspath(session_location)
#print("[GidData.readSession()] session_location = {}".format(session_location))
if os.path.exists(session_location):
DOMTree = xml.dom.minidom.parse(session_location)
self._currentSession = DOMTree
collection = DOMTree.documentElement
temporary_child = collection.getElementsByTagName("session")[0]
session = self.transcribeSession(temporary_child)
else:
print("../temp/session.gid does not exist.")
return session
def readSearchList(self):
if self._currentSession is not None:
DOMTree = self._currentSession
collection = DOMTree.documentElement
searches = collection.getElementsByTagName("search")
for search in searches:
self.searchList.append(self.populateSearch(search))
#return self.searchList
# Remove output_items and thumbnail_folder_path parameters
#def storeSearch(self, search, output_items, thumbnail_folder_path):
# self.currentSearch = search
# session_location = os.path.join(os.path.realpath('.'), 'temp')
# session_location = os.path.join(session_location, 'session.gid')
# session_location = os.path.abspath(session_location)
# #print("[GidData.storeSearch()] session_location = {}".format(session_location))
# self.sessionFile = open(session_location, "w")
# xmlString = self.createXmlString(output_items, thumbnail_folder_path)
# self.sessionFile.write(xmlString)
# self.sessionFile.close()
def storeSearch(self, inputSearch, searchLocation):
implementation = getDOMImplementation()
document = implementation.createDocument(None, "gid", None)
DOMTree = document
temporary_child = self.translateSearch(inputSearch)
DOMTree.documentElement.appendChild(temporary_child)
self.writeSearch(DOMTree, searchLocation)
def storeSession(self, inputSession):
implementation = getDOMImplementation()
document = implementation.createDocument(None, "gid", None)
DOMTree = document
temporary_child = self.translateSession(inputSession)
DOMTree.documentElement.appendChild(temporary_child)
self.writeSession(DOMTree)
def transcribePicture(self, inputChild):
output = None
location = None
provenance = None
thumbnail = None
alternate = None
if inputChild.hasAttribute("thumbnail"):
thumbnail = child.getAttribute("thumbnail")
if inputChild.hasAttribute("alternate"):
alternate = child.getAttribute("alternate")
sub_child = inputChild.getElementsByTagName("location")[0]
location = sub_child.childNodes[0].data
sub_child = inputChild.getElementsByTagName("provenance")[0]
provenance = sub_child.childNodes[0].data
output = GidPicture()
if thumbnail is not None:
thumbnail = thumbnail.capitalize()
if thumbnail == "FALSE":
thumbnail = False
elif thumbnail == "NO":
thumbnail = False
elif thumbnail == "TRUE":
thumbnail = True
elif thumbnail == "YES":
thumbnail = True
else:
print("WARNING GidData.transcribePicture(DOMTree): invalid thumbnail setting")
thumbnail = None
output.thumbnail = thumbnail
output.location = location
output.provenance = provenance
ouput.alternate = alternate
return output
def translatePicture(self, inputPicture):
implementation = getDOMImplementation()
document = implementation.createDocument(None, "gid", None)
DOMTree = document
temporary_child = DOMTree.createElement('picture')
if inputPicture.thumbnail:
temporary_child.setAttribute('thumbnail', 'true')
else:
temporary_child.setAttribute('thumbnail', 'false')
sub_child = DOMTree.createElement('location')
if inputPicture.location is not None:
text_node = DOMTree.createTextNode(inputPicture.location)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
sub_child = DOMTree.createElement('provenance')
if inputPicture.provenance is not None:
text_node = DOMTree.createTextNode(inputPicture.provenance)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputPicture.note:
sub_child = DOMTree.createElement('note')
text_node = DOMTree.createTextNode(inputPicture.note)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputPicture.alternate:
sub_child = DOMTree.createElement('alternate')
text_node = DOMTree.createTextNode(inputPicture.alternate)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputPicture.provenance_size:
sub_child = DOMTree.createElement('provenance_size')
text_node = DOMTree.createTextNode(inputPicture.provenance_size)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputPicture.provenance_type:
sub_child = DOMTree.createElement('provenance_type')
text_node = DOMTree.createTextNode(inputPicture.provenance_type)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
return temporary_child
def transcribeResult(self, DOMTree):
image_filename = None
image_format = None
image_height = None
image_width = None
image_link = None
image_description = None
image_host = None
image_source = None
image_thumbnail_url = None
pictures = [] # Eliminate?
output = GidResult.GidResult()
temporary_child = DOMTree.getElementsByTagName("picture")
for child in temporary_child:
picture = self.transcribePicture(child)
pictures.append(picture)
if picture.thumbnail == True:
output.thumbnail = picture
elif picture.thumbnail == False:
output.picture = picture
else:
print("WARNING GidData.transcribeResult(DOMTree): picture.thumbnail invalid")
temporary_child = DOMTree.getElementsByTagName("image_filename")
if temporary_child:
temporary_child = temporary_child[0]
sub_child = temporary_child.childNodes
if sub_child:
image_filename = sub_child[0].data
if image_filename:
output.image_filename = image_filename
temporary_child = DOMTree.getElementsByTagName("image_format")
if temporary_child:
temporary_child = temporary_child[0]
sub_child = temporary_child.childNodes
if sub_child:
image_format = sub_child[0].data
if image_format:
output.image_format = image_format
temporary_child = DOMTree.getElementsByTagName("image_height")
if temporary_child:
temporary_child = temporary_child[0]
sub_child = temporary_child.childNodes
if sub_child:
image_height = sub_child[0].data
if image_height:
output.image_height = image_height
temporary_child = DOMTree.getElementsByTagName("image_width")
if temporary_child:
temporary_child = temporary_child[0]
sub_child = temporary_child.childNodes
if sub_child:
image_width = sub_child[0].data
if image_width:
output.image_width = image_width
temporary_child = DOMTree.getElementsByTagName("image_link")
if temporary_child:
temporary_child = temporary_child[0]
sub_child = temporary_child.childNodes
if sub_child:
image_link = sub_child[0].data
if image_link:
output.image_link = image_link
temporary_child = DOMTree.getElementsByTagName("image_description")
if temporary_child:
temporary_child = temporary_child[0]
sub_child = temporary_child.childNodes
if sub_child:
image_description = sub_child[0].data
if image_description:
output.image_description = image_description
temporary_child = DOMTree.getElementsByTagName("image_host")
if temporary_child:
temporary_child = temporary_child[0]
sub_child = temporary_child.childNodes
if sub_child:
image_host = sub_child[0].data
if image_host:
output.image_host = image_host
temporary_child = DOMTree.getElementsByTagName("image_source")
if temporary_child:
temporary_child = temporary_child[0]
sub_child = temporary_child.childNodes
if sub_child:
image_source = sub_child[0].data
if image_source:
output.image_source = image_source
temporary_child = DOMTree.getElementsByTagName("image_thumbnail_url")
if temporary_child:
temporary_child = temporary_child[0]
sub_child = temporary_child.childNodes
if sub_child:
image_thumbnail_url = sub_child[0].data
if image_thumbnail_url:
output.image_thumbnail_url = image_thumbnail_url
return output
def translateResult(self, inputResult):
implementation = getDOMImplementation()
document = implementation.createDocument(None, "gid", None)
DOMTree = document
temporary_child = DOMTree.createElement('result')
if inputResult.image_filename is not None:
sub_child = DOMTree.createElement('image_filename')
text_node = DOMTree.createTextNode(inputResult.image_filename)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputResult.image_format is not None:
sub_child = DOMTree.createElement('image_format')
text_node = DOMTree.createTextNode(inputResult.image_format)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputResult.image_height is not None:
sub_child = DOMTree.createElement('image_height')
text_node = DOMTree.createTextNode(str(inputResult.image_height))
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputResult.image_width is not None:
sub_child = DOMTree.createElement('image_width')
text_node = DOMTree.createTextNode(str(inputResult.image_width))
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputResult.image_link is not None:
sub_child = DOMTree.createElement('image_link')
text_node = DOMTree.createTextNode(inputResult.image_link)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputResult.image_description is not None:
sub_child = DOMTree.createElement('image_description')
text_node = DOMTree.createTextNode(inputResult.image_description)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputResult.image_host is not None:
sub_child = DOMTree.createElement('image_host')
text_node = DOMTree.createTextNode(inputResult.image_host)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputResult.image_source is not None:
sub_child = DOMTree.createElement('image_source')
text_node = DOMTree.createTextNode(inputResult.image_source)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputResult.image_thumbnail_url is not None:
sub_child = DOMTree.createElement('image_thumbnail_url')
text_node = DOMTree.createTextNode(inputResult.image_thumbnail_url)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputResult.thumbnail is not None:
sub_child = self.translatePicture(inputResult.thumbnail)
temporary_child.appendChild(sub_child)
if inputResult.picture is not None:
sub_child = self.translatePicture(inputResult.picture)
temporary_child.appendChild(sub_child)
return temporary_child
def transcribeSearch(self, DOMTree):
results = []
identity = None
output = None
if DOMTree.hasAttribute("identity"):
identity = DOMTree.getAttribute("identity")
else:
print("WARNING GidData.transcribe(DOMTree): search has no identity")
temporary_child = DOMTree.getElementsByTagName("setting")
if temporary_child:
temporary_child = temporary_child[0]
setting = self.transcribeSetting(temporary_child)
output = GidSearch.GidSearch(input_settings = setting)
else:
output = GidSearch.GidSearch()
if identity is not None:
output.identity = identity
temporary_child = DOMTree.getElementsByTagName("result")
for child in temporary_child:
result = self.transcribeResult(child)
results.append(result)
output.results = results
return output
def translateSearch(self, inputSearch):
implementation = getDOMImplementation()
document = implementation.createDocument(None, "gid", None)
DOMTree = document
temporary_child = DOMTree.createElement('search')
temporary_child.setAttribute("identity", inputSearch.identity)
sub_child = self.translateSetting(inputSearch.settings)
temporary_child.appendChild(sub_child)
for result in inputSearch.results:
sub_child = self.translateResult(result)
temporary_child.appendChild(sub_child)
return temporary_child
def transcribeSession(self, DOMTree):
searches = []
output = GidSession.GidSession()
temporary_child = DOMTree.getElementsByTagName("search")
for child in temporary_child:
search = self.transcribeSearch(child)
searches.append(search)
output.searches = searches
return output
def translateSession(self, inputSession):
implementation = getDOMImplementation()
document = implementation.createDocument(None, "gid", None)
DOMTree = document
temporary_child = DOMTree.createElement('session')
for search in inputSession.searches:
sub_child = self.translateSearch(search)
temporary_child.appendChild(sub_child)
return temporary_child
def transcribeSetting(self, DOMTree):
config_file = None
keywords = None
keywords_from_file = None
prefix_keywords = None
suffix_keywords = None
limit = None
related_images = None
INPUT_format = None
color = None
color_type = None
usage_rights = None
size = None
exact_size = None
aspect_ratio = None
INPUT_type = None
time = None
time_range = None
delay = None
url = None
single_image = None
output_directory = None
image_directory = None
no_directory = None
proxy = None
similar_images = None
specific_site = None
print_urls = None
print_size = None
print_paths = None
metadata = None
extract_metadata = None
socket_timeout = None
thumbnail = None
thumbnail_only = None
language = None
prefix = None
chromedriver = None
safe_search = None
no_numbering = None
offset = None
save_source = None
no_download = None
silent_mode = None
ignore_urls = None
help = None
output = GidSettings()
temporary_child = DOMTree.getElementsByTagName("config_file")[0]
config_file = temporary_child.childNodes[0].data
if config_file:
output.config_file = config_file
temporary_child = DOMTree.getElementsByTagName("keywords")[0]
keywords = temporary_child.childNodes[0].data
if keywords:
output.keywords = keywords
temporary_child = DOMTree.getElementsByTagName("keywords_from_file")[0]
keywords_from_file = temporary_child.childNodes[0].data
if keywords_from_file:
output.keywords_from_file = keywords_from_file
temporary_child = DOMTree.getElementsByTagName("prefix_keywords")[0]
prefix_keywords = temporary_child.childNodes[0].data
if prefix_keywords:
output.prefix_keywords = prefix_keywords
temporary_child = DOMTree.getElementsByTagName("suffix_keywords")[0]
suffix_keywords = temporary_child.childNodes[0].data
if suffix_keywords:
output.suffix_keywords = suffix_keywords
temporary_child = DOMTree.getElementsByTagName("limit")[0]
limit = temporary_child.childNodes[0].data
if limit:
output.limit = limit
temporary_child = DOMTree.getElementsByTagName("related_images")[0]
related_images = temporary_child.childNodes[0].data
if related_images:
output.related_images = related_images
temporary_child = DOMTree.getElementsByTagName("format")[0]
INPUT_format = temporary_child.childNodes[0].data
if INPUT_format:
output.format = INPUT_format
temporary_child = DOMTree.getElementsByTagName("color")[0]
color = temporary_child.childNodes[0].data
if color:
output.color = color
temporary_child = DOMTree.getElementsByTagName("color_type")[0]
color_type = temporary_child.childNodes[0].data
if color_type:
output.color_type = color_type
temporary_child = DOMTree.getElementsByTagName("usage_rights")[0]
usage_rights = temporary_child.childNodes[0].data
if usage_rights:
output.usage_rights = usage_rights
temporary_child = DOMTree.getElementsByTagName("size")[0]
_size = temporary_child.childNodes[0].data
if _size:
output.size = _size
temporary_child = DOMTree.getElementsByTagName("exact_size")[0]
exact_size = temporary_child.childNodes[0].data
if exact_size:
output.exact_size = exact_size
temporary_child = DOMTree.getElementsByTagName("aspect_ratio")[0]
aspect_ratio = temporary_child.childNodes[0].data
if aspect_ratio:
output.aspect_ratio = aspect_ratio
temporary_child = DOMTree.getElementsByTagName("type")[0]
_type = temporary_child.childNodes[0].data
if _type:
output.type = _type
temporary_child = DOMTree.getElementsByTagName("time_range")[0]
time_range = temporary_child.childNodes[0].data
if time_range:
output.time_range = time_range
temporary_child = DOMTree.getElementsByTagName("delay")[0]
delay = temporary_child.childNodes[0].data
if delay:
output.delay = delay
temporary_child = DOMTree.getElementsByTagName("url")[0]
url = temporary_child.childNodes[0].data
if url:
output.url = url
temporary_child = DOMTree.getElementsByTagName("single_image")[0]
single_image = temporary_child.childNodes[0].data
if single_image:
output.single_image = single_image
temporary_child = DOMTree.getElementsByTagName("output_directory")[0]
output_directory = temporary_child.childNodes[0].data
if output_directory:
output.output_directory = output_directory
temporary_child = DOMTree.getElementsByTagName("image_directory")[0]
image_directory = temporary_child.childNodes[0].data
if image_directory:
output.image_directory = image_directory
temporary_child = DOMTree.getElementsByTagName("no_directory")[0]
no_directory = temporary_child.childNodes[0].data
if no_directory:
output.no_directory = no_directory
temporary_child = DOMTree.getElementsByTagName("proxy")[0]
proxy = temporary_child.childNodes[0].data
if proxy:
output.proxy = proxy
temporary_child = DOMTree.getElementsByTagName("similar_images")[0]
similar_images = temporary_child.childNodes[0].data
if similar_images:
output.similar_images = similar_images
temporary_child = DOMTree.getElementsByTagName("specific_site")[0]
specific_site = temporary_child.childNodes[0].data
if specific_site:
output.specific_site = specific_site
temporary_child = DOMTree.getElementsByTagName("print_urls")[0]
print_urls = temporary_child.childNodes[0].data
if print_urls:
output.print_urls = print_urls
temporary_child = DOMTree.getElementsByTagName("print_size")[0]
print_size = temporary_child.childNodes[0].data
if print_size:
output.print_size = print_size
temporary_child = DOMTree.getElementsByTagName("print_paths")[0]
print_paths = temporary_child.childNodes[0].data
if print_paths:
output.print_paths = print_paths
temporary_child = DOMTree.getElementsByTagName("metadata")[0]
metadata = temporary_child.childNodes[0].data
if metadata:
output.metadata = metadata
temporary_child = DOMTree.getElementsByTagName("extract_metadata")[0]
extract_metadata = temporary_child.childNodes[0].data
if extract_metadata:
output.extract_metadata = extract_metadata
temporary_child = DOMTree.getElementsByTagName("socket_timeout")[0]
socket_timeout = temporary_child.childNodes[0].data
if socket_timeout:
output.socket_timeout = socket_timeout
temporary_child = DOMTree.getElementsByTagName("thumbnail")[0]
thumbnail = temporary_child.childNodes[0].data
if thumbnail:
output.thumbnail = thumbnail
temporary_child = DOMTree.getElementsByTagName("thumbnail_only")[0]
thumbnail_only = temporary_child.childNodes[0].data
if thumbnail_only:
output.thumbnail_only = thumbnail_only
temporary_child = DOMTree.getElementsByTagName("language")[0]
language = temporary_child.childNodes[0].data
if language:
output.language = language
temporary_child = DOMTree.getElementsByTagName("prefix")[0]
prefix = temporary_child.childNodes[0].data
if prefix:
output.prefix = prefix
temporary_child = DOMTree.getElementsByTagName("chromedriver")[0]
chromedriver = temporary_child.childNodes[0].data
if chromedriver:
output.chromedriver = chromedriver
temporary_child = DOMTree.getElementsByTagName("safe_search")[0]
safe_search = temporary_child.childNodes[0].data
if safe_search:
output.safe_search = safe_search
temporary_child = DOMTree.getElementsByTagName("no_numbering")[0]
no_numbering = temporary_child.childNodes[0].data
if no_numbering:
output.no_numbering = no_numbering
temporary_child = DOMTree.getElementsByTagName("offset")[0]
offset = temporary_child.childNodes[0].data
if offset:
output.offset = offset
temporary_child = DOMTree.getElementsByTagName("save_source")[0]
save_source = temporary_child.childNodes[0].data
if save_source:
output.save_source = save_source
temporary_child = DOMTree.getElementsByTagName("no_download")[0]
no_download = temporary_child.childNodes[0].data
if no_download:
output.no_download = no_download
temporary_child = DOMTree.getElementsByTagName("silent_mode")[0]
silent_mode = temporary_child.childNodes[0].data
if silent_mode:
output.silent_mode = silent_mode
temporary_child = DOMTree.getElementsByTagName("ignore_urls")[0]
ignore_urls = temporary_child.childNodes[0].data
if ignore_urls:
output.ignore_urls = ignore_urls
temporary_child = DOMTree.getElementsByTagName("help")[0]
help = temporary_child.childNodes[0].data
if help:
output.help = help
return output
def translateSetting(self, inputSetting):
implementation = getDOMImplementation()
document = implementation.createDocument(None, "gid", None)
DOMTree = document
temporary_child = DOMTree.createElement('result')
if inputSetting.config_file is not None:
sub_child = DOMTree.createElement('config_file')
text_node = DOMTree.createTextNode(inputSetting.config_file)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputSetting.keywords is not None:
sub_child = DOMTree.createElement('keywords')
text_node = DOMTree.createTextNode(inputSetting.keywords)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputSetting.keywords_from_file is not None:
sub_child = DOMTree.createElement('keywords_from_file')
text_node = DOMTree.createTextNode(inputSetting.keywords_from_file)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputSetting.prefix_keywords is not None:
sub_child = DOMTree.createElement('prefix_keywords')
text_node = DOMTree.createTextNode(inputSetting.prefix_keywords)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputSetting.suffix_keywords is not None:
sub_child = DOMTree.createElement('suffix_keywords')
text_node = DOMTree.createTextNode(inputSetting.suffix_keywords)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputSetting.limit is not None:
sub_child = DOMTree.createElement('limit')
text_node = DOMTree.createTextNode(str(inputSetting.limit))
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputSetting.related_images is not None:
sub_child = DOMTree.createElement('related_images')
text_node = DOMTree.createTextNode(inputSetting.related_images)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputSetting.format is not None:
sub_child = DOMTree.createElement('format')
text_node = DOMTree.createTextNode(inputSetting.format)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
if inputSetting.color is not None:
sub_child = DOMTree.createElement('color')
text_node = DOMTree.createTextNode(inputSetting.color)
sub_child.appendChild(text_node)
temporary_child.appendChild(sub_child)
return temporary_child
def writeSearch(self, DOMTree, searchLocation):
output_file = open(searchLocation, "w")
DOMTree.writexml(output_file, indent = "\t", addindent = "\t", newline = '\n')
def writeSession(self, DOMTree):
session_location = os.path.join(os.path.realpath('.'), 'temp')
session_location = os.path.join(session_location, 'session.gid')
session_location = os.path.abspath(session_location)
output_file = open(session_location, "w")
DOMTree.writexml(output_file, indent = "\t", addindent = "\t", newl = '\n')
#def createXmlString(self, input_items, input_directory):
# xmlString = '<?xml version="1.0" encoding="UTF-8"?>\n'
# xmlString = xmlString + '<session>\n'
# xmlString = xmlString + '\t<search identity=\"'
# xmlString = xmlString + self.currentSearch.identity + '\">\n'
# xmlString = xmlString + '\t\t<setting>\n'
# xmlString = xmlString + '\t\t\t<keyword>' + self.currentSearch.settings.keywords
# xmlString = xmlString + '</keyword>\n'
# xmlString = xmlString + '\t\t</setting>\n'
# for item in input_items:
# xmlString = xmlString + '\t\t<result>\n'
# xmlString = xmlString + '\t\t\t<picture thumbnail="true">\n'
# xmlString = xmlString + '\t\t\t\t<location>'
# xmlString = xmlString + os.path.join(input_directory, item['image_filename']).replace('&', '&')
# xmlString = xmlString + '</location>\n'
# xmlString = xmlString + '\t\t\t\t<provenance>'
# xmlString = xmlString + '</provenance>\n'
# xmlString = xmlString + '\t\t\t</picture>\n'
# xmlString = xmlString + '\t\t</result>\n'
# xmlString = xmlString + '\t</search>\n'
# xmlString = xmlString + '</session>\n'
# return xmlString
| 2.65625
| 3
|
Pygame/JonathanGame/analogclock.py
|
youaresherlock/PythonPractice
| 0
|
12776090
|
<gh_stars>0
import sys, random, math, pygame
from pygame import locals
# main program begins
pygame.init()
screen = pygame.display.set_mode((600, 500))
pygame.display.set_caption("Circle Demo")
screen.fill((0, 0, 100))
pos_x = 300
pos_y = 250
radius = 200
angle = 360
color = 240, 240, 240
# repeating loop
while True:
for event in pygame.event.get():
if event.type == locals.QUIT:
sys.exit()
keys = pygame.key.get_pressed()
if keys[locals.K_ESCAPE]:
sys.exit()
# increment angle
angle += 1
if angle >= 360:
angle = 0
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
color = r, g, b
# calculate coordinates
x = math.cos(math.radians(angle)) * radius
y = math.sin(math.radians(angle)) * radius
# draw one step around the circle
pos = int(pos_x + x), int(pos_y + y)
print(pos)
pygame.draw.circle(screen, color, pos, 10, 0)
pygame.display.update()
| 3.765625
| 4
|
tests/test_c_nms_onnx_ex.py
|
hozmi/box_utils
| 0
|
12776091
|
"""Test NMS.
Run the examples described in `ONNX docs`_.
.. _ONNX docs: https://github.com/onnx/onnx/blob/main/docs/Operators.md#NonMaxSuppression
"""
# import pytest
import numpy as np
import box_utils._c.box_nms as box_nms
def test_nms_suppress_by_iou():
"""Test NMS - suppress by IoU."""
# --
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]]).astype(np.float32)
scores = np.array([[[
0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array(
[[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_suppress_by_IOU_and_scores():
"""Test NMS - suppress by IoU and scores."""
# --
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]]).astype(np.float32)
scores = np.array(
[[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.4]).astype(np.float32)
selected_indices = np.array([[0, 0, 3], [0, 0, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_single_box():
"""Test NMS - single box."""
# --
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0]
]]).astype(np.float32)
scores = np.array([[[0.9]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([[0, 0, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_identical_boxes():
"""Test NMS - identical boxes."""
# --
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]
]]).astype(np.float32)
scores = np.array([[[
0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9
]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([[0, 0, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_limit_output_size():
"""Test NMS - limit output size."""
# --
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]]).astype(np.float32)
scores = np.array([[[
0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([2]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([[0, 0, 3], [0, 0, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_two_batches():
"""Test NMS - two batches."""
# --
boxes = np.array([[[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]],
[[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]]]).astype(np.float32)
scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]],
[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([2]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([
[0, 0, 3], [0, 0, 0], [1, 0, 3], [1, 0, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_two_classes():
"""Test NMS - two classes."""
# --
boxes = np.array([[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]]).astype(np.float32)
scores = np.array([[
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3],
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([2]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([
[0, 0, 3], [0, 0, 0], [0, 1, 3], [0, 1, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_center_point_box_format():
"""Test NMS - center-point box format."""
# --
boxes = np.array([[
[0.5, 0.5, 1.0, 1.0],
[0.5, 0.6, 1.0, 1.0],
[0.5, 0.4, 1.0, 1.0],
[0.5, 10.5, 1.0, 1.0],
[0.5, 10.6, 1.0, 1.0],
[0.5, 100.5, 1.0, 1.0]
]]).astype(np.float32)
scores = np.array([[
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([
[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)
# --
result = box_nms.xywh_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_flipped_coordinates():
"""Test NMS - flipped coordinates."""
# --
boxes = np.array([[
[1.0, 1.0, 0.0, 0.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, 0.9, 1.0, -0.1],
[0.0, 10.0, 1.0, 11.0],
[1.0, 10.1, 0.0, 11.1],
[1.0, 101.0, 0.0, 100.0]
]]).astype(np.float32)
scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([[0, 0, 3], [0, 0, 0], [0, 0, 5]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
# ---------------------------------------------------------
# box_nms can be called in some other way.
# ---------------------------------------------------------
def test_nms_suppress_by_iou_nobatch():
"""Test NMS - suppress by IoU."""
# --
boxes = np.array([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]).astype(np.float32)
scores = np.array([[
0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array(
[[0, 3], [0, 0], [0, 5]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_suppress_by_iou_noclass():
"""Test NMS - suppress by IoU."""
# --
boxes = np.array([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]).astype(np.float32)
scores = np.array([
0.9, 0.75, 0.6, 0.95, 0.5, 0.3]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([3, 0, 5]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_suppress_by_iou_notopk():
"""Test NMS - suppress by IoU."""
# --
boxes = np.array([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]).astype(np.float32)
scores = np.array([
0.9, 0.75, 0.6, 0.95, 0.5, 0.3]).astype(np.float32)
max_output_boxes_per_class = np.array([-1]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([3, 0, 5]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_two_classes_nobatch():
"""Test NMS - two classes."""
# --
boxes = np.array([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0]
]).astype(np.float32)
scores = np.array([
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3],
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]).astype(np.float32)
max_output_boxes_per_class = np.array([2]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([
[0, 3], [0, 0], [1, 3], [1, 0]]).astype(np.int64)
# --
result = box_nms.ltrb_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_center_point_box_format_nobatch():
"""Test NMS - center-point box format."""
# --
boxes = np.array([
[0.5, 0.5, 1.0, 1.0],
[0.5, 0.6, 1.0, 1.0],
[0.5, 0.4, 1.0, 1.0],
[0.5, 10.5, 1.0, 1.0],
[0.5, 10.6, 1.0, 1.0],
[0.5, 100.5, 1.0, 1.0]
]).astype(np.float32)
scores = np.array([
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([
[0, 3], [0, 0], [0, 5]]).astype(np.int64)
# --
result = box_nms.xywh_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
def test_nms_center_point_box_format_noclass():
"""Test NMS - center-point box format."""
# --
boxes = np.array([
[0.5, 0.5, 1.0, 1.0],
[0.5, 0.6, 1.0, 1.0],
[0.5, 0.4, 1.0, 1.0],
[0.5, 10.5, 1.0, 1.0],
[0.5, 10.6, 1.0, 1.0],
[0.5, 100.5, 1.0, 1.0]
]).astype(np.float32)
scores = np.array(
[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.0]).astype(np.float32)
selected_indices = np.array([3, 0, 5]).astype(np.int64)
# --
result = box_nms.xywh_nms(
boxes, scores,
score_threshold[0], iou_threshold[0], max_output_boxes_per_class[0])
np.testing.assert_array_equal(result, selected_indices)
| 2.453125
| 2
|
Codes/gracekoo/interview_8.py
|
ghoslation/algorithm
| 256
|
12776092
|
# -*- coding: utf-8 -*-
# @Time: 2020/7/16 11:38
# @Author: GraceKoo
# @File: interview_8.py
# @Desc: https://www.nowcoder.com/practice/8c82a5b80378478f9484d87d1c5f12a4?tpId=13&rp=1&ru=%2Fta%2Fcoding-interviews&qr
# u=%2Fta%2Fcoding-interviews%2Fquestion-ranking
class Solution:
def climbStairs(self, n: int) -> int:
if 0 <= n <= 2:
return n
dp = [i for i in range(n)]
dp[0] = 1
dp[1] = 2
for i in range(2, n):
dp[i] = dp[i - 1] + dp[i - 2]
return dp[-1]
so = Solution()
print(so.climbStairs(3))
| 3.59375
| 4
|
src/compiler/gen_comb.py
|
luinaudt/deparser
| 7
|
12776093
|
from collections import OrderedDict
def genListCombinaison(P4Graph):
""" Generate a set of tuples from graph.
Each tuple represents possible active headers at the same time
"""
combinaison = []
return combinaison
def genListHeaders(P4Graph):
""" Generate the dictionnary of headers.
P4Graph : JSon imported file
Name, size
"""
headers = OrderedDict()
return headers
def genBitPos(combinaison, Entete, bus_width=64, muxNum=0, Payload=False):
""" Gen list of unique tuple (name, pos, [Etat]) of the muxNum.
Each tuple correspond to the bit of a protocol that have to be connected
to the mux
"""
def GetPosTuple(nom, pos, liste):
for e, v in enumerate(listeEntree):
if v[0] == nom and v[1] == pos:
return e
return 0
if bus_width <= muxNum:
raise ValueError("bus width {} smaller than mux number :{}".format(
bus_width, muxNum))
listeEntree = []
EtatAssocie = []
for combeNum, comb in enumerate(combinaison):
pos = muxNum
for j in comb:
while pos < Entete[j]:
if (j, pos) not in listeEntree:
listeEntree.append((j, pos))
EtatAssocie.append([])
EtatAssocie[-1].append(combeNum)
else:
e = GetPosTuple(j, pos, listeEntree)
if combeNum not in EtatAssocie[e]:
EtatAssocie[e].append(combeNum)
else:
print("{}, {}".format(j, pos))
pos += bus_width
pos -= Entete[j]
if Payload:
if ("Payload", pos) not in listeEntree:
listeEntree.append(("Payload", pos))
EtatAssocie.append([])
EtatAssocie[-1].append(combeNum)
else:
e = GetPosTuple("Payload", pos, listeEntree)
if combeNum not in EtatAssocie[e]:
EtatAssocie[e].append(combeNum)
else:
print("{}, {}".format(j, pos))
for i, tup in enumerate(listeEntree):
newTup = (tup[0], tup[1], EtatAssocie[i])
listeEntree[i] = newTup
return listeEntree
def sortListBitTuple(liste, headers):
output = []
def takeSecond(elem):
return elem[1]
for entete in headers:
tmp = []
for nom, pos, etat in liste:
if nom == entete:
tmp.append((nom, pos, etat))
tmp.sort(key=takeSecond)
output.extend(tmp)
return output
| 2.921875
| 3
|
tests/test_rut_chile.py
|
merfrei/rut-chile
| 0
|
12776094
|
import pytest
from rut_chile import rut_chile
class TestIsValidRutTests:
@pytest.mark.parametrize("test_input, expected_value", [
(None, ValueError),
("", ValueError),
(" ", ValueError),
("k", ValueError),
("1", ValueError),
("*", ValueError),
("1-", ValueError),
(".-", ValueError),
("1.", ValueError),
("1.11", ValueError),
("1.111K", ValueError),
(".1", ValueError),
("123.K", ValueError),
("123.12-K", ValueError)
])
def test_invalid_argument(self, test_input, expected_value):
with pytest.raises(ValueError) as error:
rut_chile.is_valid_rut(test_input)
assert type(error.value) is expected_value
@pytest.mark.parametrize("test_input, expected_value", [
("9868503-1", False),
("21518268-2", False),
("17175325-3", False),
("20930576-4", False),
("13402128-5", False),
("20737522-6", False),
("6842256-7", False),
("14983005-8", False),
("20247667-9", False),
("17832479-k", False),
("12667869-0", False)
])
def test_invalid_rut(self, test_input, expected_value):
assert rut_chile.is_valid_rut(test_input) == expected_value
@pytest.mark.parametrize("test_input, expected_value", [
("00", True),
("0-0", True),
("1-9", True),
("98685030", True),
("9868503-0", True),
("9.868.503-0", True),
("21518268-1", True),
("17175325-2", True),
("20930576-3", True),
("13402128-4", True),
("20737522-5", True),
("6842256-6", True),
("14983005-7", True),
("20247667-8", True),
("17832479-9", True),
("12667869-k", True),
("12667869-K", True),
("12.667.869-K", True),
("12.667.869-k", True)
])
def test_valid_rut(self, test_input, expected_value):
assert rut_chile.is_valid_rut(test_input) == expected_value
class TestGetVerificationDigit:
@pytest.mark.parametrize("test_input, expected_value", [
(None, ValueError),
("", ValueError),
(" ", ValueError),
("k", ValueError),
("1k", ValueError),
("*", ValueError),
("1-", ValueError),
(".-", ValueError),
("12312-K", ValueError),
("12.312-K", ValueError),
])
def test_invalid_argument(self, test_input, expected_value):
with pytest.raises(ValueError) as error:
rut_chile.get_verification_digit(test_input)
assert type(error.value) is expected_value
@pytest.mark.parametrize("test_input, expected_value", [
("0", "0"),
("1", "9"),
("9868503", "0"),
("21518268", "1"),
("17175325", "2"),
("20930576", "3"),
("13402128", "4"),
("20737522", "5"),
("6842256", "6"),
("14983005", "7"),
("20247667", "8"),
("17832479", "9"),
("12667869", "k")
])
def test_valid_rut(self, test_input, expected_value):
assert rut_chile.get_verification_digit(test_input) == expected_value
class TestGetCapitalizedVerificationDigit:
@pytest.mark.parametrize("test_input, expected_value", [
(None, ValueError),
("", ValueError),
(" ", ValueError),
("k", ValueError),
("1k", ValueError),
("*", ValueError),
("1-", ValueError),
(".-", ValueError),
("12312-K", ValueError),
("12.312-K", ValueError),
])
def test_invalid_argument(self, test_input, expected_value):
with pytest.raises(ValueError) as error:
rut_chile.get_capitalized_verification_digit(test_input)
assert type(error.value) is expected_value
@pytest.mark.parametrize("test_input, expected_value", [
("0", "0"),
("1", "9"),
("9868503", "0"),
("21518268", "1"),
("17175325", "2"),
("20930576", "3"),
("13402128", "4"),
("20737522", "5"),
("6842256", "6"),
("14983005", "7"),
("20247667", "8"),
("17832479", "9"),
("12667869", "K")
])
def test_valid_rut(self, test_input, expected_value):
digit = rut_chile.get_capitalized_verification_digit(test_input)
assert digit == expected_value
class TestFormatRutWithDots:
@pytest.mark.parametrize("test_input, expected_value", [
(None, ValueError),
("", ValueError),
(" ", ValueError),
("k", ValueError),
("ab", ValueError),
("*", ValueError),
("1-", ValueError),
(".-", ValueError),
("1.", ValueError),
("1.11", ValueError)
])
def test_invalid_argument(self, test_input, expected_value):
with pytest.raises(ValueError) as error:
rut_chile.format_rut_with_dots(test_input)
assert type(error.value) is expected_value
@pytest.mark.parametrize("test_input, expected_value", [
("12", "1-2"),
("123", "12-3"),
("1234", "123-4"),
("12345", "1.234-5"),
("123456", "12.345-6"),
("1234567", "123.456-7"),
("12345678", "1.234.567-8"),
("123456789", "12.345.678-9"),
("123456789k", "123.456.789-k"),
])
def test_valid_rut(self, test_input, expected_value):
assert rut_chile.format_rut_with_dots(test_input) == expected_value
class TestFormatCapitalizedRutWithDots:
@pytest.mark.parametrize("test_input, expected_value", [
(None, ValueError),
("", ValueError),
(" ", ValueError),
("k", ValueError),
("ab", ValueError),
("*", ValueError),
("1-", ValueError),
(".-", ValueError),
("1.", ValueError),
("1.11", ValueError)
])
def test_invalid_argument(self, test_input, expected_value):
with pytest.raises(ValueError) as error:
rut_chile.format_capitalized_rut_with_dots(test_input)
assert type(error.value) is expected_value
@pytest.mark.parametrize("test_input, expected_value", [
("12", "1-2"),
("123", "12-3"),
("1234", "123-4"),
("12345", "1.234-5"),
("123456", "12.345-6"),
("1234567", "123.456-7"),
("12345678", "1.234.567-8"),
("123456789", "12.345.678-9"),
("123456789k", "123.456.789-K"),
])
def test_valid_rut(self, test_input, expected_value):
rut = rut_chile.format_capitalized_rut_with_dots(test_input)
assert rut == expected_value
class TestFormatRutWithoutDots:
@pytest.mark.parametrize("test_input, expected_value", [
(None, ValueError),
("", ValueError),
(" ", ValueError),
("k", ValueError),
("ab", ValueError),
("*", ValueError),
("1-", ValueError),
(".-", ValueError),
("1.", ValueError),
("1.11", ValueError)
])
def test_invalid_argument(self, test_input, expected_value):
with pytest.raises(ValueError) as error:
rut_chile.format_rut_without_dots(test_input)
assert type(error.value) is expected_value
@pytest.mark.parametrize("test_input, expected_value", [
("12", "1-2"),
("123", "12-3"),
("1234", "123-4"),
("12345", "1234-5"),
("123456", "12345-6"),
("1234567", "123456-7"),
("12345678", "1234567-8"),
("123456789", "12345678-9"),
("123456789k", "123456789-k"),
])
def test_valid_rut(self, test_input, expected_value):
assert rut_chile.format_rut_without_dots(test_input) == expected_value
class TestFormatCapitalizedRutWithoutDots:
@pytest.mark.parametrize("test_input, expected_value", [
(None, ValueError),
("", ValueError),
(" ", ValueError),
("k", ValueError),
("ab", ValueError),
("*", ValueError),
("1-", ValueError),
(".-", ValueError),
("1.", ValueError),
("1.11", ValueError)
])
def test_invalid_argument(self, test_input, expected_value):
with pytest.raises(ValueError) as error:
rut_chile.format_capitalized_rut_without_dots(test_input)
assert type(error.value) is expected_value
@pytest.mark.parametrize("test_input, expected_value", [
("12", "1-2"),
("123", "12-3"),
("1234", "123-4"),
("12345", "1234-5"),
("123456", "12345-6"),
("1234567", "123456-7"),
("12345678", "1234567-8"),
("123456789", "12345678-9"),
("123456789k", "123456789-K"),
])
def test_valid_rut(self, test_input, expected_value):
rut = rut_chile.format_capitalized_rut_without_dots(test_input)
assert rut == expected_value
| 2.578125
| 3
|
tests/components/slack/__init__.py
|
tbarbette/core
| 22,481
|
12776095
|
<reponame>tbarbette/core
"""Slack notification tests."""
| 0.871094
| 1
|
datasets.py
|
skyoung/ROAM
| 37
|
12776096
|
# ------------------------------------------------------------------
# PyTorch implementation of
# "ROAM: Recurrently Optimizing Tracking Model", CVPR, 2020
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------
import config
import numpy as np
import os
import torch
import torchvision.transforms as trans
import json
from torch.utils.data import Dataset
from utils import get_search_patch, get_search_size, gaussian_shaped_labels, default_loader
import cv2
class BaseDataset(Dataset):
def __init__(self, train_dir, val_dir=None, is_train=True):
if is_train:
self._seq_dir = train_dir
with open(os.path.join(train_dir, 'train.json'), 'r') as f:
self._seqs = json.load(f)
else:
np.random.seed(123)
self._seq_dir = val_dir
with open(os.path.join(val_dir, 'val.json'), 'r') as f:
self._seqs = json.load(f)
self.n_seq = len(self._seqs)
print(self.n_seq)
self.is_train = is_train
self.seq_id = -1
self.preprocessor = trans.Compose(
[trans.ToTensor(),
trans.Normalize(mean=config.mean, std=config.std)])
self.pad_value = np.array(config.mean)
base_target_sz = np.array([config.base_target_sz, config.base_target_sz])
self.base_window_sz = get_search_size(base_target_sz, config.search_scale)
window_cell_sz = np.ceil(self.base_window_sz / config.cell_sz)
self.window_cell_sz = window_cell_sz - (window_cell_sz % 2) + 1
def __len__(self):
return len(self._seqs)
def __getitem__(self, seq_id):
# uniformly choosing video frames
seq_name = self._seqs[seq_id]['seq_name']
start_frame = self._seqs[seq_id]['start_frame']
end_frame = self._seqs[seq_id]['end_frame']
bboxes = self._seqs[seq_id]['gt_bboxes']
visible = self._seqs[seq_id]['visible']
visible_frame_idxes = np.arange(0, end_frame - start_frame + 1)[visible]
seq_len = len(visible_frame_idxes)
clip_len = config.look_ahead * config.time_step + 1
assert seq_len >= clip_len
start_idx = np.random.randint(seq_len - clip_len + 1) if self.is_train else 0
selected_idxes = [visible_frame_idxes[idx] for idx in range(start_idx, start_idx + clip_len)]
# build training examples for initial frame
patches, label_maps, gt_bboxes = [], [], []
img_path = self._get_img_path(seq_name, start_frame, selected_idxes[0])
init_image = default_loader(img_path)
init_bbox = np.array(bboxes[selected_idxes[0]])
for ratio in config.aug_init_ratios:
for scale in config.aug_init_scales:
# aspect ratio augmentation
height, width = init_image.shape[0: 2]
sw, sh = int(width * ratio), int(height / ratio)
image_resized = cv2.resize(init_image, (sw, sh))
bbox_reiszed = init_bbox * np.array([ratio, 1 / ratio, ratio, 1 / ratio])
# scale changes augmentation
search_scale = config.search_scale / scale
# generate training examples
patch, label_map, bbox_on_patch = self._generate_training_examples(image_resized, bbox_reiszed, search_scale)
patches.append(patch)
label_maps.append(label_map)
gt_bboxes.append(bbox_on_patch)
# build training examples for subsequent frames.
for i, idx in enumerate(selected_idxes[1:]):
img_path = self._get_img_path(seq_name, start_frame, idx)
image = default_loader(img_path)
bbox = np.array(bboxes[idx])
# aspect ratio augmentation
height, width = image.shape[0: 2]
ratio = np.random.uniform(config.aug_ratios_range[0], config.aug_ratios_range[1])
sw, sh = int(width * ratio), int(height / ratio)
image = cv2.resize(image, (sw, sh))
bbox = bbox * np.array([ratio, 1 / ratio, ratio, 1 / ratio])
# scale changes augmentation
obj_scale = np.random.uniform(config.aug_scales_range[0], config.aug_scales_range[1])
search_scale = config.search_scale/obj_scale
# generate training examples
patch, label_map, bbox_on_patch = self._generate_training_examples(image, bbox, search_scale)
patches.append(patch)
label_maps.append(label_map)
gt_bboxes.append(bbox_on_patch)
patches = torch.stack(patches, 0)
label_maps = torch.stack(label_maps, 0)
gt_bboxes = torch.stack(gt_bboxes, 0)
return patches, label_maps, gt_bboxes
def _get_img_path(self, seq_name, start_frame, index):
raise NotImplementedError
def _generate_training_examples(self, image, bbox, search_scale):
# get roi patches
window_sz = get_search_size(bbox[2:], search_scale)
scale = self.base_window_sz[0] / window_sz[0]
offset = np.random.randint(-config.offset_range, config.offset_range, 2)
center = bbox[:2] + np.floor(bbox[2:] / 2) - offset/scale
patch = get_search_patch(image, center, self.pad_value, window_sz, self.base_window_sz)
patch = self.preprocessor(patch)
# get label maps
scaled_bbox = np.round(bbox * scale)
target_cell_sz = np.ceil(scaled_bbox[2:] / config.cell_sz)
output_sigma = target_cell_sz * config.output_sigma_factor
map_center = np.floor(self.window_cell_sz / 2) + offset //config.cell_sz
label_map = gaussian_shaped_labels(output_sigma, self.window_cell_sz, map_center)
label_map = torch.from_numpy(label_map[None, :]).float()
# get gt bboxes
bbox_center = np.zeros(2, np.float32) + offset
bbox_on_patch = np.concatenate([bbox_center, scaled_bbox[2:]])
bbox_on_patch = torch.from_numpy(bbox_on_patch).float()
return patch, label_map, bbox_on_patch
class VIDataset(BaseDataset):
def __init__(self, root_dir, is_train):
train_dir = os.path.join(root_dir, 'Data/ILSVRC/Data/VID/train')
val_dir = os.path.join(root_dir, 'Data/ILSVRC/Data/VID/val')
super(VIDataset, self).__init__(train_dir, val_dir, is_train)
def _get_img_path(self, seq_name, start_frame, index):
img_path = self._seq_dir + ('/%s/%06d.JPEG' % (seq_name, (start_frame - 1) + index))
return img_path
| 2.296875
| 2
|
athenet/tests/stress_test.py
|
heurezjusz/Athena
| 2
|
12776097
|
"""Stress testing athenet.algorithm.derest.derivative functions.
"""
import numpy as np
import theano
import theano.tensor as T
import unittest
from math import e
from athenet.algorithm.numlike import TheanoInterval
from athenet.algorithm.derest.derivative import *
from athenet.algorithm.derest.activation import *
from numpy.random import rand
import timeit
theano.config.exception_verbosity = 'high'
theano.config.optimizer = 'fast_compile'
A = np.array
def theano_var(x):
return theano.shared(rand(*x).astype(theano.config.floatX))
def theano_interval(x):
v = theano_var(x)
return TheanoInterval(v, v)
class ActivationStressTest(unittest.TestCase):
def check_time(self, name, start_time, constr_time, ex_time):
print ''
print name + ':'
print 'constr_time:', constr_time - start_time
print 'ex_time:', ex_time - constr_time
def test_fully_connected(self):
iinp = theano_interval((1024,))
b = theano_interval((1000,))
w = theano_var((1024, 1000))
start_time = timeit.default_timer()
iout = fully_connected(iinp, w, b)
constr_time = timeit.default_timer()
l, u = iout.eval()
ex_time = timeit.default_timer()
self.check_time('fully_connected', start_time, constr_time, ex_time)
def test_convolutional(self):
shp = (3, 224, 224)
iinp = theano_interval(shp)
w = theano_var((64, 3, 7, 7))
b = theano_var((64,))
start_time = timeit.default_timer()
iout = conv(iinp, shp, w, (64, 7, 7), b, stride=(2, 2),
padding=(3, 3))
constr_time = timeit.default_timer()
l, u = iout.eval()
ex_time = timeit.default_timer()
self.check_time('convolutional', start_time, constr_time, ex_time)
def test_avg_pool(self):
shp = (24, 16, 16)
# TODO: test this (real case) (memory / time issues)
# shp = (4, 192, 28, 28)
iinp = theano_interval(shp)
start_time = timeit.default_timer()
iout = pool(iinp, shp, poolsize=(3, 3), stride=(1, 1), mode='avg')
constr_time = timeit.default_timer()
l, u = iout.eval()
ex_time = timeit.default_timer()
self.check_time('avg_pool', start_time, constr_time, ex_time)
def test_max_pool(self):
shp = (24, 16, 16)
# TODO: test this (real case) (memory / time issues)
# shp = (4, 192, 28, 28)
iinp = theano_interval(shp)
start_time = timeit.default_timer()
iout = pool(iinp, shp, poolsize=(3, 3), stride=(1, 1), mode='max')
constr_time = timeit.default_timer()
l, u = iout.eval()
ex_time = timeit.default_timer()
self.check_time('max_pool', start_time, constr_time, ex_time)
def test_softmax(self):
# TODO: test this (real case) (memory / time issues)
# shp = (1000,)
# TODO: I think that softmax doesn't have to be calculated for Derest
shp = (20,)
iinp = theano_interval(shp)
start_time = timeit.default_timer()
iout = softmax(iinp, *shp)
constr_time = timeit.default_timer()
l, u = iout.eval()
ex_time = timeit.default_timer()
self.check_time('softmax', start_time, constr_time, ex_time)
def test_norm(self):
alpha = 0.00002
beta = 0.75
k = 1.0
n = 5
shp = (64, 56, 56)
iinp = theano_interval(shp)
start_time = timeit.default_timer()
iout = norm(iinp, shp)
constr_time = timeit.default_timer()
l, u = iout.eval()
ex_time = timeit.default_timer()
self.check_time('norm', start_time, constr_time, ex_time)
def test_dropout(self):
iinp = theano_interval((50, 1024, 1, 1))
start_time = timeit.default_timer()
iout = d_dropout(iinp, 0.8)
constr_time = timeit.default_timer()
l, u = iout.eval()
ex_time = timeit.default_timer()
self.check_time('dropout', start_time, constr_time, ex_time)
def test_relu(self):
iinp = theano_interval((50, 1024, 1, 1))
start_time = timeit.default_timer()
iout = relu(iinp)
constr_time = timeit.default_timer()
l, u = iout.eval()
ex_time = timeit.default_timer()
self.check_time('relu', start_time, constr_time, ex_time)
class DerivativeStressTest(unittest.TestCase):
def check_time(self, name, start_time, constr_time, ex_time):
print ''
print name + ':'
print 'constr_time:', constr_time - start_time
print 'ex_time:', ex_time - constr_time
def test_fully_connected(self):
idout = theano_interval((1, 1000))
w = rand(1024, 1000)
shp = (1, 1024)
start_time = timeit.default_timer()
din = d_fully_connected(idout, w, shp)
constr_time = timeit.default_timer()
l, u = din.eval()
ex_time = timeit.default_timer()
self.check_time('d_fully_connected', start_time, constr_time, ex_time)
def test_convolutional(self):
dout = theano_interval((1, 2, 14, 14))
w = theano_var((2, 3, 7, 7))
start_time = timeit.default_timer()
din = d_conv(dout, (1, 3, 28, 28), (2, 7, 7), w, stride=(2, 2),
padding=(3, 3))
# TODO: test this (real case) (memory / time issues)
# dout = theano_interval((1, 64, 112, 112))
# w = theano_var((64, 3, 7, 7))
# start_time = timeit.default_timer()
# din = d_conv(dout, (1, 3, 244, 244), (64, 7, 7), w, stride=(2, 2),
# padding=(3, 3))
constr_time = timeit.default_timer()
l, u = din.eval()
ex_time = timeit.default_timer()
self.check_time('d_convolutional', start_time, constr_time, ex_time)
def test_avg_pool(self):
shp = (4, 24, 14, 14)
# TODO: test this (real case) (memory / time issues)
# shp = (4, 192, 28, 28)
iinp = theano_interval(shp)
idout = theano_interval(shp)
start_time = timeit.default_timer()
din = d_pool(idout, iinp, shp, poolsize=(3, 3), padding=(1, 1),
stride=(1, 1), mode='avg')
constr_time = timeit.default_timer()
l, u = din.eval()
ex_time = timeit.default_timer()
self.check_time('d_avg_pool', start_time, constr_time, ex_time)
def test_max_pool(self):
shp = (2, 12, 3, 3)
# TODO: test this (real case) (memory / time issues)
# shp = (4, 192, 28, 28)
iinp = theano_interval(shp)
idout = theano_interval(shp)
start_time = timeit.default_timer()
din = d_pool(idout, iinp, shp, poolsize=(3, 3), padding=(1, 1),
stride=(1, 1), mode='max')
constr_time = timeit.default_timer()
l, u = din.eval()
ex_time = timeit.default_timer()
self.check_time('d_max_pool', start_time, constr_time, ex_time)
def test_softmax(self):
dout = TheanoInterval.derest_output(1000)
start_time = timeit.default_timer()
din = d_softmax(dout)
constr_time = timeit.default_timer()
l, u = din.eval()
ex_time = timeit.default_timer()
self.check_time('d_softmax', start_time, constr_time, ex_time)
def test_norm(self):
alpha = 0.00002
beta = 0.75
k = 1.0
n = 5
# TODO: Check higher batch size (memory issues)
# iinp = theano_interval((50, 64, 56, 56))
# idout = theano_interval((50, 64, 56, 56))
iinp = theano_interval((10, 64, 56, 56))
idout = theano_interval((10, 64, 56, 56))
shp = (10, 64, 56, 56)
start_time = timeit.default_timer()
din = d_norm(idout, iinp, shp, n, k, alpha, beta)
constr_time = timeit.default_timer()
l, u = din.eval()
ex_time = timeit.default_timer()
self.check_time('d_norm', start_time, constr_time, ex_time)
def test_dropout(self):
idout = theano_interval((50, 1024, 1, 1))
start_time = timeit.default_timer()
idin = d_dropout(idout, 0.8)
constr_time = timeit.default_timer()
l, u = idin.eval()
ex_time = timeit.default_timer()
self.check_time('d_dropout', start_time, constr_time, ex_time)
def test_relu(self):
idout = theano_interval((50, 1024, 1, 1))
iinp = theano_interval((50, 1024, 1, 1))
start_time = timeit.default_timer()
idin = d_relu(idout, iinp)
constr_time = timeit.default_timer()
l, u = idin.eval()
ex_time = timeit.default_timer()
self.check_time('d_relu', start_time, constr_time, ex_time)
if __name__ == '__main__':
unittest.main(verbosity=2, catchbreak=True)
| 2.390625
| 2
|
openrave/docs/source/tutorials/openravepy_examples/create_customsensor.py
|
jdsika/TUM_HOly
| 2
|
12776098
|
"""Creates a custom kinematics body with two links and one joint
"""
from openravepy import *
from numpy import eye, array, zeros
env = Environment() # create openrave environment
env.SetViewer('qtcoin') # attach viewer (optional)
with env:
robot=RaveCreateRobot(env,'')
robot.SetName('camera')
linkinfo=KinBody.LinkInfo()
linkinfo._name='camerabase'
ginfo=KinBody.GeometryInfo()
ginfo._type=GeometryType.Box
ginfo._vGeomData=[0.1,0.1,0.1] # box extents
ginfo._vDiffuseColor=[0,0,1]
ginfo._t = eye(4)
linkinfo._vgeometryinfos = [ginfo]
camera1info=Robot.AttachedSensorInfo()
camera1info._linkname='camerabase'
camera1info._name = 'ensenson10'
camera1info._sensorname = 'base_pinhole_camera'
camera1info._trelative = eye(4)
camera1info._trelative[0:3,3] = [0,0,0.1]
camera1info._sensorgeometry = CameraGeomData()
camera1info._sensorgeometry.width = 640
camera1info._sensorgeometry.height = 480
camera1info._sensorgeometry.intrinsics.K = array([[640.0,0,320],[0,640,240],[0,0,1]])
camera1info._sensorgeometry.intrinsics.distortion_coeffs = zeros(5)
camera1info._sensorgeometry.intrinsics.distortion_model = 'opencv'
camera1info._sensorgeometry.intrinsics.focal_length = 0.05
robot.Init([linkinfo],[],[],[])
env.Add(robot)
robot.AddAttachedSensor(camera1info,True)
| 2.765625
| 3
|
codes/scripts/audio/generate_quantized_mels.py
|
neonbjb/DL-Art-School
| 12
|
12776099
|
<reponame>neonbjb/DL-Art-School
import os
import os.path as osp
import logging
import random
import argparse
import torchvision
import utils
import utils.options as option
import utils.util as util
from models.waveglow.denoiser import Denoiser
from trainer.ExtensibleTrainer import ExtensibleTrainer
from data import create_dataset, create_dataloader
from tqdm import tqdm
import torch
import numpy as np
from scipy.io import wavfile
if __name__ == "__main__":
# Set seeds
torch.manual_seed(5555)
random.seed(5555)
np.random.seed(5555)
#### options
torch.backends.cudnn.benchmark = True
want_metrics = False
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, help='Path to options YAML file.', default='../options/generate_quantized_mels.yml')
opt = option.parse(parser.parse_args().opt, is_train=False)
opt = option.dict_to_nonedict(opt)
utils.util.loaded_options = opt
util.mkdirs(
(path for key, path in opt['path'].items()
if not key == 'experiments_root' and 'pretrain_model' not in key and 'resume' not in key))
util.setup_logger('base', opt['path']['log'], 'test_' + opt['name'], level=logging.INFO,
screen=True, tofile=True)
logger = logging.getLogger('base')
logger.info(option.dict2str(opt))
test_loaders = []
for phase, dataset_opt in sorted(opt['datasets'].items()):
test_set, collate_fn = create_dataset(dataset_opt, return_collate=True)
test_loader = create_dataloader(test_set, dataset_opt, collate_fn=collate_fn)
logger.info('Number of test texts in [{:s}]: {:d}'.format(dataset_opt['name'], len(test_set)))
test_loaders.append(test_loader)
model = ExtensibleTrainer(opt)
outpath = opt['path']['results_root']
os.makedirs(os.path.join(outpath, 'quantized_mels'), exist_ok=True)
for test_loader in test_loaders:
dataset_dir = opt['path']['results_root']
util.mkdir(dataset_dir)
tq = tqdm(test_loader)
for data in tq:
with torch.no_grad():
model.feed_data(data, 0)
model.test()
wavfiles = data['filenames']
quantized = model.eval_state[opt['eval']['quantized_mels']][0]
for i, filename in enumerate(wavfiles):
qmelfile = filename.replace('wavs/', 'quantized_mels/') + '.pth'
torch.save(quantized[i], os.path.join(outpath, qmelfile))
| 1.867188
| 2
|
src/petronia/defimpl/platform/windows/input/keymap.py
|
groboclown/petronia
| 19
|
12776100
|
<reponame>groboclown/petronia<filename>src/petronia/defimpl/platform/windows/input/keymap.py<gh_stars>10-100
"""
Handlers for Windows VirtualKey (VK) codes and key names.
"""
from typing import Sequence, List, Dict, Set
def vk_to_names(vk: int) -> Sequence[str]:
maps: List[str] = []
for vk_str, code in STR_VK_MAP.items():
# There are multiple mappings; return them all.
if code == vk:
maps.append(vk_str)
if not maps:
maps.append("#{0}".format(hex(vk)))
return maps
def is_vk_modifier(vk: int) -> bool:
return vk in _MODIFIER_KEYS
def is_specially_handled_vk_key(vk: int) -> bool:
return vk in SPECIAL_MODIFIER_CHECK_VKEY_CODES
def contains_specially_handled_vk_key(vks: Sequence[int]) -> bool:
for vk in vks:
if vk in SPECIAL_MODIFIER_CHECK_VKEY_CODES:
return True
return False
def get_modifier_vk_keys(include_special: bool) -> Set[int]:
ret = set(_MODIFIER_KEYS)
if not include_special:
for vk in SPECIAL_MODIFIER_CHECK_VKEY_CODES:
if vk in ret:
ret.remove(vk)
return ret
# Built-in alias VK keys that represent several keys.
# Specific for modifiers.
VK_ALIASES: Dict[str, Sequence[str]] = {
"win": ("lsuper", "rsuper",),
"super": ("lsuper", "rsuper",),
"shift": ("lshift", "rshift",),
"control": ("lcontrol", "rcontrol",),
"ctrl": ("lcontrol", "rcontrol",),
"alt": ("lalt", "ralt",),
"menu": ("lmenu", "rmenu",),
}
# Set of all recognized modifiers
MODIFIERS: Set[str] = {
# "shift",
"lshift",
"rshift",
# "control",
# "ctrl",
"lcontrol",
"lctrl",
"rcontrol",
"rctrl",
# "alt",
"lalt",
"ralt",
"lsuper",
"lwin",
"rsuper",
"rwin",
"lmenu",
"rmenu",
"apps",
"caps-lock",
}
# https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx
STR_VK_MAP: Dict[str, int] = {
"lmb": 0x01, # VK_LBUTTON Left mouse button
"rmb": 0x02, # VK_RBUTTON Right mouse button
"break": 0x03, # VK_CANCEL Control-break processing
"mmb": 0x04, # VK_MBUTTON Middle mouse button (three-button mouse)
"x1mb": 0x05, # VK_XBUTTON1 X1 mouse button
"x2mb": 0x06, # VK_XBUTTON2 X2 mouse button
"x3mb": 0x07, # - Undefined
"back": 0x08, # VK_BACK BACKSPACE key
"backspace": 0x08, # VK_BACK BACKSPACE key
"tab": 0x09, # VK_TAB TAB key
# - 0x0A-0B Reserved
"clear": 0x0C, # VK_CLEAR CLEAR key
"return": 0x0D, # VK_RETURN ENTER key
"enter": 0x0D, # VK_RETURN ENTER key
"cr": 0x0D, # VK_RETURN ENTER key
"lf": 0x0D, # VK_RETURN ENTER key
# - 0x0E-0F Undefined
# These VK keys don't seem to get generated by the global key handler;
# instead, the more low-level (lcontrol, rcontrol, etc) ones are.
# "shift": 0x10, # VK_SHIFT SHIFT key
# "sft": 0x10, # VK_SHIFT SHIFT key
# "control": 0x11, # VK_CONTROL CTRL key
# "ctrl": 0x11, # VK_CONTROL CTRL key
# "menu": 0x12, # VK_MENU ALT key
# "alt": 0x12, # VK_MENU ALT key
"pause": 0x13, # VK_PAUSE PAUSE key
"caps-lock": 0x14, # VK_CAPITAL CAPS LOCK key
"kana": 0x15, # VK_KANA IME Kana mode
"hanguel": 0x15, # VK_HANGUEL IME Hanguel mode (maintained for compatibility; use VK_HANGUL)
"hangul": 0x15, # VK_HANGUL IME Hangul mode
# - 0x16 Undefined
"junja": 0x17, # VK_JUNJA IME Junja mode
"final": 0x18, # VK_FINAL IME final mode
"hanja": 0x19, # VK_HANJA IME Hanja mode
"kanji": 0x19, # VK_KANJI IME Kanji mode
# 0x1A - Undefined
"escape": 0x1B, # VK_ESCAPE ESC key
"esc": 0x1B, # VK_ESCAPE ESC key
"convert": 0x1C, # VK_CONVERT IME convert
"nonconvert": 0x1D, # VK_NONCONVERT IME nonconvert
"accept": 0x1E, # VK_ACCEPT IME accept
"modechange": 0x1F, # VK_MODECHANGE IME mode change request
"space": 0x20, # VK_SPACE SPACEBAR
"prior": 0x21, # VK_PRIOR PAGE UP key
"pgup": 0x21, # VK_PRIOR PAGE UP key
"pageup": 0x21, # VK_PRIOR PAGE UP key
"next": 0x22, # VK_NEXT PAGE DOWN key
"pgdn": 0x22, # VK_NEXT PAGE DOWN key
"pagedown": 0x22, # VK_NEXT PAGE DOWN key
"end": 0x23, # VK_END END key
"home": 0x24, # VK_HOME HOME key
"left": 0x25, # VK_LEFT LEFT ARROW key
"up": 0x26, # VK_UP UP ARROW key
"right": 0x27, # VK_RIGHT RIGHT ARROW key
"down": 0x28, # VK_DOWN DOWN ARROW key
"select": 0x29, # VK_SELECT SELECT key
"print": 0x2A, # VK_PRINT PRINT key
"execute": 0x2B, # VK_EXECUTE EXECUTE key
"snapshot": 0x2C, # VK_SNAPSHOT PRINT SCREEN key
"insert": 0x2D, # VK_INSERT INS key
"delete": 0x2E, # VK_DELETE DEL key
"del": 0x2E, # VK_DELETE DEL key
"help": 0x2F, # VK_HELP HELP key
"lsuper": 0x5B, # VK_LWIN Left Windows key (Natural keyboard)
"lwin": 0x5B, # VK_LWIN Left Windows key (Natural keyboard)
"rsuper": 0x5C, # VK_RWIN Right Windows key (Natural keyboard)
"rwin": 0x5C, # VK_RWIN Right Windows key (Natural keyboard)
"apps": 0x5D, # VK_APPS Applications key (Natural keyboard)
# 0x5E - Reserved
"sleep": 0x5F, # VK_SLEEP Computer Sleep key
"numpad0": 0x60, # VK_NUMPAD0 Numeric keypad 0 key
"numpad1": 0x61, # VK_NUMPAD1 Numeric keypad 1 key
"numpad2": 0x62, # VK_NUMPAD2 Numeric keypad 2 key
"numpad3": 0x63, # VK_NUMPAD3 Numeric keypad 3 key
"numpad4": 0x64, # VK_NUMPAD4 Numeric keypad 4 key
"numpad5": 0x65, # VK_NUMPAD5 Numeric keypad 5 key
"numpad6": 0x66, # VK_NUMPAD6 Numeric keypad 6 key
"numpad7": 0x67, # VK_NUMPAD7 Numeric keypad 7 key
"numpad8": 0x68, # VK_NUMPAD8 Numeric keypad 8 key
"numpad9": 0x69, # VK_NUMPAD9 Numeric keypad 9 key
"multiply": 0x6A, # VK_MULTIPLY Multiply key
"add": 0x6B, # VK_ADD Add key
"separator": 0x6C, # VK_SEPARATOR Separator key
"subtract": 0x6D, # VK_SUBTRACT Subtract key
"decimal": 0x6E, # VK_DECIMAL Decimal key
"divide": 0x6F, # VK_DIVIDE Divide key
"f1": 0x70, # VK_F1 F1 key
"f2": 0x71, # VK_F2 F2 key
"f3": 0x72, # VK_F3 F3 key
"f4": 0x73, # VK_F4 F4 key
"f5": 0x74, # VK_F5 F5 key
"f6": 0x75, # VK_F6 F6 key
"f7": 0x76, # VK_F7 F7 key
"f8": 0x77, # VK_F8 F8 key
"f9": 0x78, # VK_F9 F9 key
"f10": 0x79, # VK_F10 F10 key
"f11": 0x7A, # VK_F11 F11 key
"f12": 0x7B, # VK_F12 F12 key
"f13": 0x7C, # VK_F13 F13 key
"f14": 0x7D, # VK_F14 F14 key
"f15": 0x7E, # VK_F15 F15 key
"f16": 0x7F, # VK_F16 F16 key
"f17": 0x80, # VK_F17 F17 key
"f18": 0x81, # VK_F18 F18 key
"f19": 0x82, # VK_F19 F19 key
"f20": 0x83, # VK_F20 F20 key
"f21": 0x84, # VK_F21 F21 key
"f22": 0x85, # VK_F22 F22 key
"f23": 0x86, # VK_F23 F23 key
"f24": 0x87, # VK_F24 F24 key
# 0x88-8F - Unassigned
"numlock": 0x90, # VK_NUMLOCK NUM LOCK key
"scroll": 0x91, # VK_SCROLL SCROLL LOCK key
# 0x92-96 - OEM specific
# 0x97-9F - Unassigned
"lshift": 0xA0, # VK_LSHIFT Left SHIFT key
"rshift": 0xA1, # VK_RSHIFT Right SHIFT key
"lcontrol": 0xA2, # VK_LCONTROL Left CONTROL key
"lctrl": 0xA2, # VK_LCONTROL Left CONTROL key
"rcontrol": 0xA3, # VK_RCONTROL Right CONTROL key
"rctrl": 0xA3, # VK_RCONTROL Right CONTROL key
"lmenu": 0xA4, # VK_LMENU Left MENU key
"lalt": 0xA4, # VK_LMENU Left MENU key
"rmenu": 0xA5, # VK_RMENU Right MENU key
"ralt": 0xA5, # VK_RMENU Right MENU key
"browser-back": 0xA6, # VK_BROWSER_BACK Browser Back key
"browser-forward": 0xA7, # VK_BROWSER_FORWARD Browser Forward key
"browser-refresh": 0xA8, # VK_BROWSER_REFRESH Browser Refresh key
"browser-stop": 0xA9, # VK_BROWSER_STOP Browser Stop key
"browser-search": 0xAA, # VK_BROWSER_SEARCH Browser Search key
"browser-favorites": 0xAB, # VK_BROWSER_FAVORITES Browser Favorites key
"browser-home": 0xAC, # VK_BROWSER_HOME Browser Start and Home key
"volume-mute": 0xAD, # VK_VOLUME_MUTE Volume Mute key
"volume-down": 0xAE, # VK_VOLUME_DOWN Volume Down key
"volume-up": 0xAF, # VK_VOLUME_UP Volume Up key
"media-next-track": 0xB0, # VK_MEDIA_NEXT_TRACK Next Track key
"media-prev-track": 0xB1, # VK_MEDIA_PREV_TRACK Previous Track key
"media-stop": 0xB2, # VK_MEDIA_STOP Stop Media key
"media-play-pause": 0xB3, # VK_MEDIA_PLAY_PAUSE Play/Pause Media key
"launch-mail": 0xB4, # VK_LAUNCH_MAIL Start Mail key
"launch-media-select": 0xB5, # VK_LAUNCH_MEDIA_SELECT Select Media key
"launch-app1": 0xB6, # VK_LAUNCH_APP1 Start Application 1 key
"launch-app2": 0xB7, # VK_LAUNCH_APP2 Start Application 2 key
# 0xB8-B9 - Reserved
"oem_1": 0xBA, # VK_OEM_1 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard, the ';:' key
":": 0xBA,
";": 0xBA,
"colon": 0xBA,
"semi-colon": 0xBA,
"oem_plus": 0xBB, # VK_OEM_PLUS For any country/region, the '+=' key
"plus": 0xBB,
"=": 0xBB,
"equal": 0xBB,
"oem_comma": 0xBC, # VK_OEM_COMMA For any country/region, the ',<' key
"comma": 0xBC,
",": 0xBC,
"<": 0xBC,
"less-than": 0xBC,
"oem_minus": 0xBD, # VK_OEM_MINUS For any country/region, the '-_' key
"minus": 0xBD,
"dash": 0xBD,
"under": 0xBD,
"lodash": 0xBD,
"oem_period": 0xBE, # VK_OEM_PERIOD For any country/region, the '.>' key
".": 0xBE,
"period": 0xBE,
">": 0xBE,
"greater-than": 0xBE,
"oem_2": 0xBF, # VK_OEM_2 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard, the '/?' key
"/": 0xBF,
"slash": 0xBF,
"?": 0xBF,
"question": 0xBF,
"question-mark": 0xBF,
"oem2": 0xBF,
"oem_3": 0xC0, # VK_OEM_3 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard, the '`~' key
"oem3": 0xC0,
"~": 0xC0,
"tilde": 0xC0,
"`": 0xC0,
"back-tick": 0xC0,
# 0xC1-D7 - Reserved
# 0xD8-DA - Unassigned
"oem_4": 0xDB, # VK_OEM_4 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard, the '[{' key
"oem4": 0xDB,
"[": 0xDB,
"left-bracket": 0xBD,
"left-brace": 0xBD,
"open-bracket": 0xBD,
"open-brace": 0xBD,
"{": 0xDB,
"left-curly-bracket": 0xDB,
"left-curly-brace": 0xDB,
"open-curly-bracket": 0xDB,
"open-curly-brace": 0xDB,
"oem_5": 0xDC, # VK_OEM_5 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard, the '\|' key
"oem5": 0xDC,
"|": 0xDC,
"\\": 0xDC,
"pipe": 0xDC,
"backslash": 0xDC,
"oem_6": 0xDD, # VK_OEM_6 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard, the ']}' key
"oem6": 0xDD,
"]": 0xDD,
"}": 0xDD,
"right-bracket": 0xDD,
"right-brace": 0xDD,
"close-bracket": 0xDD,
"close-brace": 0xDD,
"closed-bracket": 0xDD,
"closed-brace": 0xDD,
"right-curly-bracket": 0xDD,
"right-curly-brace": 0xDD,
"close-curly-bracket": 0xDD,
"close-curly-brace": 0xDD,
"closed-curly-bracket": 0xDD,
"closed-curly-brace": 0xDD,
"oem_7": 0xDE, # VK_OEM_7 Used for miscellaneous characters;
# it can vary by keyboard. For the US standard keyboard,
# the 'single-quote/double-quote' key
"oem7": 0xDE,
'"': 0xDE,
"'": 0xDE,
"quote": 0xDE,
"tick": 0xDE,
"double-quote": 0xDE,
"oem_8": 0xDF, # VK_OEM_8 Used for miscellaneous characters; it can vary by keyboard.
"oem8": 0xDF,
# 0xE0 - Reserved
# 0xE1 - OEM specific
"oem_102": 0xE2, # VK_OEM_102 Either the angle bracket key or the backslash key on
# the RT 102-key keyboard
"oem102": 0xE2,
# 0xE3-E4 - OEM specific
"processkey": 0xE5, # VK_PROCESSKEY IME PROCESS key
# 0xE6 - OEM specific
"packet": 0xE7, # VK_PACKET Used to pass Unicode characters as if they were
# keystrokes. The VK_PACKET key is the low word of a 32-bit Virtual
# Key value used for non-keyboard input methods. For more
# information, see Remark in KEYBDINPUT, SendInput, WM_KEYDOWN, and WM_KEYUP
# 0xE8 - Unassigned
# 0xE9-F5 - OEM specific
"attn": 0xF6, # VK_ATTN Attn key
"crsel": 0xF7, # VK_CRSEL CrSel key
"exsel": 0xF8, # VK_EXSEL ExSel key
"ereof": 0xF9, # VK_EREOF Erase EOF key
"play": 0xFA, # VK_PLAY Play key
"zoom": 0xFB, # VK_ZOOM Zoom key
"noname": 0xFC, # VK_NONAME Reserved
"pa1": 0xFD, # VK_PA1 PA1 key
"oem_clear": 0xFE, # VK_OEM_CLEAR Clear key
# 0x3A-40 - Undefined
"0": 0x30, # 0 key
"1": 0x31, # 1 key
"2": 0x32, # 2 key
"3": 0x33, # 3 key
"4": 0x34, # 4 key
"5": 0x35, # 5 key
"6": 0x36, # 6 key
"7": 0x37, # 7 key
"8": 0x38, # 8 key
"9": 0x39, # 9 key
"a": 0x41, # A key
"b": 0x42, # B key
"c": 0x43, # C key
"d": 0x44, # D key
"e": 0x45, # E key
"f": 0x46, # F key
"g": 0x47, # G key
"h": 0x48, # H key
"i": 0x49, # I key
"j": 0x4A, # J key
"k": 0x4B, # K key
"l": 0x4C, # L key
"m": 0x4D, # M key
"n": 0x4E, # N key
"o": 0x4F, # O key
"p": 0x50, # P key
"q": 0x51, # Q key
"r": 0x52, # R key
"s": 0x53, # S key
"t": 0x54, # T key
"u": 0x55, # U key
"v": 0x56, # V key
"w": 0x57, # W key
"x": 0x58, # X key
"y": 0x59, # Y key
"z": 0x5A, # Z key
}
_MODIFIER_KEYS: Set[int] = set()
for __k in MODIFIERS:
_MODIFIER_KEYS.add(STR_VK_MAP[__k])
SPECIAL_MODIFIER_CHECK_VKEY_CODES: Sequence[int] = (
STR_VK_MAP['lsuper'], STR_VK_MAP['rsuper']
)
| 2.4375
| 2
|
label_2_json_V3.py
|
zmyml/parking_labeler
| 0
|
12776101
|
# 检查label是否包含最后时间加1秒,若没有则加一行数据
# 图像列表也加在函数中
# 单个生成一个标签
# 给出一个旧标签的位置(全路径),生成一张新标签
import os
import numpy as np
import json
from create_v3_012 import create_json_record
# from create_v2 import create_json_record
# path_txt_for_check = r'Y:\dataset\inroad_parking_videos\pics\2019_08_12\DDT2G1907ZMY00008SY_label.txt'
# path_txt_for_check = r'Y:\dataset\inroad_parking_videos\pics\2019_12_28\DDT2G1907ZMY00142SY_1211_label.txt'
# path_txt_for_check = r'W:\dataset\inroad_parking_videos\pics\2019_12_14\DDT2G1907ZMY00057SY_label.txt'
path_txt_for_check = r'W:\dataset\inroad_parking_videos\pics\2020_01_19\DDT2G1907ZMY00082SY_label.txt'
# path_txt_for_check = r'W:\dataset\inroad_parking_videos\pics\2019_12_31\DDT2G1907ZMY00142SY_1211_label.txt'
imgs_dir = os.path.dirname(path_txt_for_check)
folder_name = os.path.basename(path_txt_for_check).split('_')
if len(folder_name) == 2:
folder_name = folder_name[0]
elif len(folder_name) == 3:
folder_name = '_'.join(folder_name[0:2])
elif len(folder_name) == 4:
folder_name = '_'.join(folder_name[0:3])
imgs_folder_path = f'{imgs_dir}\\{folder_name}'
imgs_list = os.listdir(imgs_folder_path)
imgs_list = [i for i in imgs_list if i.endswith('jpg')]
imgs_list.sort() # 排序
imgs_list_only_time = ['_'.join(i.split('_')[:3]) for i in imgs_list]
# # 得到最后一张图片时间
imgs_last_time = os.path.splitext(imgs_list[-1])[0]
hh, mm, ss = imgs_last_time.split('_')[:3]
sec_last_plus_one = 3600 * int(hh) + 60 * int(mm) + int(ss) + 1 # #最后时间加1s
imgs_last_time_plus_one = f'{sec_last_plus_one//3600:02d}_{sec_last_plus_one%3600//60:02d}_{sec_last_plus_one%60:02d}'
# path_local = r'C:\Users\tongxin\Desktop\label_test_666'
# path_local = r'C:\Users\tongxin\Desktop\1'
path_local = r'C:\Users\tongxin\Desktop\label_test_2020_01_07'
path_json_converted = path_local + '\\' + \
path_txt_for_check.split('\\')[-2] + '\\' + \
os.path.splitext(os.path.basename(path_txt_for_check))[0] + '_v2.json'
# os.path.splitext(os.path.basename(path_txt_for_check))[0]+'_d05.10.json'
if not os.path.isdir(os.path.dirname(path_json_converted)):
os.makedirs(os.path.dirname(path_json_converted))
# 1.读取一个txt文件
data_raw = []
with open(path_txt_for_check, 'r', encoding='UTF-8') as file_to_read:
lines = file_to_read.readlines() # 读取数据
for line in lines:
if line != '\n':
data_raw.append(line)
parking_space = [i.split(':')[0] for i in data_raw[-1].split(' ')[1:]] # 看有多少停车位
# 2.转换文件
data_raw_np = []
for i in data_raw:
for idx, j in enumerate(i.split(' ')):
if idx == len(parking_space) and (j[-1] == '\n'): # 最后一列
data_raw_np.append(j[:-1]) # 去掉"\n"
else:
data_raw_np.append(j)
record_for_json = create_json_record(data_raw_np, parking_space, imgs_last_time_plus_one, imgs_list_only_time)
file = open(path_json_converted, 'w', encoding='utf-8')
json.dump(record_for_json, file, ensure_ascii=False, indent=4)
file.close()
print(f'save new label at:{path_json_converted}')
| 2.703125
| 3
|
refman/__init__.py
|
adriancaruana/refman
| 0
|
12776102
|
<gh_stars>0
from .refman import RefMan
__version__ = "0.0.1"
| 1.09375
| 1
|
others/write_result_keras.py
|
prise-3d/Thesis-SampleAnalysis
| 0
|
12776103
|
# main imports
import numpy as np
import pandas as pd
import json
import os, sys, argparse, subprocess
# model imports
from keras.models import model_from_json
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from joblib import dump, load
# image processing imports
from PIL import Image
import ipfml.iqa.fr as fr
from ipfml import metrics
# modules and config imports
sys.path.insert(0, '') # trick to enable import of main folder module
import custom_config as cfg
n_samples_image_name_postfix = "_samples_mean.png"
reference_image_name_postfix = "_1000_samples_mean.png"
def write_result(_scene_name, _data_file, _model_path, _n, _reconstructed_path, _iqa):
# prepare data to get score information
dataset=np.loadtxt(_data_file, delimiter=';')
y = dataset[:,0]
X = dataset[:,1:]
y=np.reshape(y, (-1,1))
scaler = MinMaxScaler()
scaler.fit(X)
scaler.fit(y)
xscale=scaler.transform(X)
yscale=scaler.transform(y)
_, X_test, _, y_test = train_test_split(xscale, yscale)
# prepare image path to compare
n_samples_image_path = os.path.join(cfg.reconstructed_folder, _scene_name + '_' + _n + n_samples_image_name_postfix)
reference_image_path = os.path.join(cfg.reconstructed_folder, _scene_name + reference_image_name_postfix)
if not os.path.exists(n_samples_image_path):
# call sub process to create 'n' samples img
print("Creation of 'n' samples image : ", n_samples_image_path)
subprocess.run(["python", "reconstruct/reconstruct_scene_mean.py", "--scene", _scene_name, "--n", _n, "--image_name", n_samples_image_path.split('/')[-1]])
if not os.path.exists(reference_image_path):
# call sub process to create 'reference' img
print("Creation of reference image : ", reference_image_path)
subprocess.run(["python", "reconstruct/reconstruct_scene_mean.py", "--scene", _scene_name, "--n", str(1000), "--image_name", reference_image_path.split('/')[-1]])
# load the trained model
with open(_model_path, 'r') as f:
json_model = json.load(f)
model = model_from_json(json_model)
model.load_weights(_model_path.replace('.json', '.h5'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# get coefficient of determination score on test set
y_predicted = model.predict(X_test)
len_shape, _ = y_predicted.shape
y_predicted = y_predicted.reshape(len_shape)
coeff = metrics.coefficient_of_determination(y_test, y_predicted)
# Get data information
reference_image = Image.open(reference_image_path)
reconstructed_image = Image.open(_reconstructed_path)
n_samples_image = Image.open(n_samples_image_path)
# Load expected IQA comparison
try:
fr_iqa = getattr(fr, _iqa)
except AttributeError:
raise NotImplementedError("FR IQA `{}` not implement `{}`".format(fr.__name__, _iqa))
mse_ref_reconstructed_samples = fr_iqa(reference_image, reconstructed_image)
mse_reconstructed_n_samples = fr_iqa(n_samples_image, reconstructed_image)
model_name = _model_path.split('/')[-1].replace('.json', '')
if not os.path.exists(cfg.results_information_folder):
os.makedirs(cfg.results_information_folder)
# save score into models_comparisons_keras.csv file
with open(cfg.global_result_filepath_keras, "a") as f:
f.write(model_name + ';' + str(len(y)) + ';' + str(coeff[0]) + ';' + str(mse_reconstructed_n_samples) + ';' + str(mse_ref_reconstructed_samples) + '\n')
def main():
parser = argparse.ArgumentParser(description="Train model and saved it")
parser.add_argument('--scene', type=str, help='Scene name to reconstruct', choices=cfg.scenes_list)
parser.add_argument('--data', type=str, help='Filename of dataset')
parser.add_argument('--model_path', type=str, help='Json model file path')
parser.add_argument('--n', type=str, help='Number of pixel values approximated to keep')
parser.add_argument('--image_path', type=str, help="The image reconstructed to compare with")
parser.add_argument('--iqa', type=str, help='Image to compare', choices=['ssim', 'mse', 'rmse', 'mae', 'psnr'])
args = parser.parse_args()
param_scene_name = args.scene
param_data_file = args.data
param_n = args.n
param_model_path = args.model_path
param_image_path = args.image_path
param_iqa = args.iqa
write_result(param_scene_name, param_data_file, param_model_path, param_n, param_image_path, param_iqa)
if __name__== "__main__":
main()
| 2.296875
| 2
|
nlp/chatbot/test.py
|
zhangyong2/tensorflow_nlp
| 1
|
12776104
|
# -*- coding: utf-8 -*-
import tensorflow as tf
from nlp.chatbot.dataset import data_utils
from nltk.translate.bleu_score import sentence_bleu
from tqdm import tqdm
import os,sys
import numpy as np
from nlp.chatbot import model as s2s_model
def test_bleu(count, args):
print('准备数据')
bucket_dbs = data_utils.read_bucket_dbs(args.buckets_dir)
buckets = data_utils.buckets
bucket_sizes = []
for i in range(len(buckets)):
bucket_size = bucket_dbs[i].size
bucket_sizes.append(bucket_size)
print('bucket {} 中有数据 {} 条'.format(i, bucket_size))
total_size = sum(bucket_sizes)
print('共有数据 {} 条'.format(total_size))
if count <= 0:
count = total_size
buckets_scale = [
sum(bucket_sizes[:i + 1]) / total_size
for i in range(len(bucket_sizes))
]
with tf.Session() as sess:
model = s2s_model.create_model(sess, True)
model.batch_size = 1
sess.run(tf.initialize_all_variables())
model.saver.restore(sess, os.path.join(args.model_dir, args.model_name))
total_score = 0.0
for i in tqdm(range(count)):
random_number = np.random.random_sample()
bucket_id = min([
i for i in range(len(buckets_scale))
if buckets_scale[i] > random_number
])
data, _ = model.get_batch_data(
bucket_dbs,
bucket_id
)
encoder_inputs, decoder_inputs, decoder_weights = model.get_batch(
bucket_dbs,
bucket_id,
data
)
_, _, output_logits = model.step(
sess,
encoder_inputs,
decoder_inputs,
decoder_weights,
bucket_id,
True
)
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
ask, _ = data[0]
all_answers = bucket_dbs[bucket_id].all_answers(ask)
ret = data_utils.indice_sentence(outputs)
if not ret:
continue
references = [list(x) for x in all_answers]
score = sentence_bleu(
references,
list(ret),
weights=(1.0,)
)
total_score += score
print('BLUE: {:.2f} in {} samples'.format(total_score / count * 10, count))
def test(args):
class TestBucket(object):
def __init__(self, sentence):
self.sentence = sentence
def random(self):
return sentence, ''
buckets = data_utils.buckets
with tf.Session() as sess:
model = s2s_model.create_model(sess, True)
model.batch_size = 1
sess.run(tf.initialize_all_variables())
model.saver.restore(sess, os.path.join(args.model_dir, args.model_name))
sys.stdout.write("> ")
sys.stdout.flush()
sentence = sys.stdin.readline()
while sentence:
bucket_id = min([
b for b in range(len(buckets))
if buckets[b][0] > len(sentence)
])
data, _ = model.get_batch_data(
{bucket_id: TestBucket(sentence)},
bucket_id
)
encoder_inputs, decoder_inputs, decoder_weights = model.get_batch(
{bucket_id: TestBucket(sentence)},
bucket_id,
data
)
_, _, output_logits = model.step(
sess,
encoder_inputs,
decoder_inputs,
decoder_weights,
bucket_id,
True
)
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
ret = data_utils.indice_sentence(outputs)
print(ret)
print("> ", end="")
sys.stdout.flush()
sentence = sys.stdin.readline()
| 2.328125
| 2
|
bluebottle/projects/migrations/0037_longer_place_20170914_1129.py
|
terrameijar/bluebottle
| 10
|
12776105
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-14 09:29
from __future__ import unicode_literals
import bluebottle.utils.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0036_merge_20170831_1449'),
]
operations = [
migrations.AlterField(
model_name='project',
name='place',
field=models.CharField(blank=True, help_text='Geographical location', max_length=200, null=True),
),
]
| 1.570313
| 2
|
cartography/record.py
|
tingled/synthetic-cartography
| 0
|
12776106
|
import numpy as np
import pyaudio
from pyaudio import PyAudio
from queue import Queue
import struct
from time import sleep
def get_steinberg_device_idx(pa: PyAudio) -> int:
"""
looks up the steinberg device index
"""
for i in range(pa.get_device_count()):
name = pa.get_device_info_by_index(i)['name']
if 'steinberg' in name.lower():
return i
raise Exception("Couldn't find steinberg audio device")
class Recorder:
def __init__(self, chunk_size=512, channels=1):
# for some reason, when chunk size is 1024 we observe some
# non-random discontonuities in the signal every 1024*3 samples,
# which leads to very noticeable transients in the spectrogram
self.format = pyaudio.paFloat32
self.chunk_size = chunk_size
self.channels = channels
self.pa = PyAudio()
self.frame_queue = Queue()
self.device_idx = get_steinberg_device_idx(self.pa)
self.sr = int(self.pa.get_device_info_by_index(self.device_idx)['defaultSampleRate'])
def _get_callback(self):
def cb(input_data, frame_cnt, time_info, status_flags):
self.frame_queue.put(input_data)
return (None, pyaudio.paContinue)
return cb
def start_record(self):
self.stream = self.pa.open(
input_device_index=self.device_idx,
rate=self.sr,
format=self.format,
channels=self.channels,
input=True,
stream_callback=self._get_callback(),
frames_per_buffer=self.chunk_size)
def stop_record(self):
self.stream.stop_stream()
# unpacker = struct.Struct('f' * self.chunk_size)
# input_data = None # TODO
# output = []
# output += unpacker.unpack(input_data)
def read_queue(self):
s = struct.Struct('f'*self.chunk_size)
y = []
while not self.frame_queue.empty():
y += s.unpack(self.frame_queue.get())
return np.array(y)
if __name__ == '__main__':
r = Recorder()
r.start_record()
sleep(2)
r.stop_record()
print(r.read_queue())
| 2.578125
| 3
|
LeetCode/python3/47.py
|
ZintrulCre/LeetCode_Archiver
| 279
|
12776107
|
<reponame>ZintrulCre/LeetCode_Archiver<gh_stars>100-1000
class Solution:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
def BackTrack(m, per: list):
if m == n:
if per not in permutation:
permutation.append(per)
return per
for i in range(n):
if not visited[i]:
per.append(nums[i])
visited[i] = True
per = BackTrack(m + 1, per)
per = per[:-1]
visited[i] = False
return per
n = len(nums)
visited = [False for _ in range(n)]
per = []
permutation = []
BackTrack(0, [])
return list(set(tuple(k) for k in permutation))
| 3.125
| 3
|
plugin/src/test/resources/refactoring/extractmethod/ClassContext.before.py
|
consulo/consulo-python
| 0
|
12776108
|
class PyCharm:
<selection>print("Hello Pycharm!")</selection>
| 1.421875
| 1
|
plotter/delegation_demand.py
|
kit-tm/fdeval
| 1
|
12776109
|
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.dates as mdates
from datetime import datetime
import matplotlib.patches as patches
from matplotlib.backends.backend_pdf import PdfPages
import math
import operator
def putval(x1, x2, demand_per_tick, result):
"""
Helper function to calculate the demand over time (stored in result)
based on (x1=start, x2=end, x3=demand_per_second)
"""
for i in range(math.floor(x1)-1, math.floor(x2)+2):
demand = 0
if i-x1 >= 1 and x2-i >= 0:
demand = demand_per_tick
if i-x1 < 1 and i-x1 > 0:
demand = (i-x1)*demand_per_tick
if i-x2 < 1 and i-x2 > 0:
demand = (1-(i-x2))*demand_per_tick
if not result.get(i): result[i] = 0
result[i] += demand
def plot(ctx):
"Volume that was delegated"
result = dict()
result2 = dict()
# extract total demand over time
events2 = []
for flow in ctx.flows:
events2.append((flow.start, flow.demand_per_tick))
events2.append((flow.start+flow.duration, -flow.demand_per_tick))
putval(flow.start, flow.start+flow.duration, flow.demand_per_tick, result2)
# extract delegated demand over time
events = []
demand_per_port = {}
demand_delegated = 0
demand_total = 0
per_tick = {}
for flow in ctx.flows:
demand_total += flow.duration * flow.demand_per_tick
if len(flow.delegation.reports) > 1:
for r1, r2 in zip(flow.delegation.reports, flow.delegation.reports[1:]):
# start and end time of delegation are recorded
if r1.action == 1 and r2.action == 0:
demand = (r2.tick-r1.tick)*flow.demand_per_tick
demand_delegated += demand
putval(r1.tick, r2.tick, flow.demand_per_tick, result)
assert(demand >= 0)
events.append((r1.tick, demand))
events.append((r2.tick, -demand))
rlast = flow.delegation.reports[-1]
if rlast.action == 1:
demand = (flow.finished_at-rlast.tick)*flow.demand_per_tick
demand_delegated += demand
assert(demand >= 0)
putval(rlast.tick, flow.finished_at, flow.demand_per_tick, result)
events.append((rlast.tick, demand))
events.append((flow.finished_at, -demand))
if len(flow.delegation.reports) == 1:
r1 = flow.delegation.reports[0]
demand = (flow.finished_at-r1.tick)*flow.demand_per_tick
demand_delegated += demand
assert(demand >= 0)
putval(r1.tick, flow.finished_at, flow.demand_per_tick, result)
events.append((r1.tick, demand))
events.append((flow.finished_at, -demand))
fig, ax = plt.subplots(figsize=(8, 3))
fig.tight_layout(pad=2.7)
xvalues = []
yvalues = []
for t, v in sorted(result.items()):
xvalues.append(int(t))
yvalues.append(v/1000.0)
#fig, ax = plt.subplots(figsize=(16, 8))
ax.plot(xvalues, yvalues, color='black', linewidth=1)
ax.set_xlabel('time (s)')
ax.set_ylabel('delegated (Mbit/s)')
#fill_underutil = [True if x < threshold and x+e > threshold else False for x, e in zip(cnt_active_flows, cnt_active_flows_evicted)]
ax.fill_between(xvalues, yvalues, 0, color='orange', alpha=0.3)
#ax.set_title('%s (%s)' % (names[solver], metrics[objective]), fontsize=10, fontweight='bold')
ax.set_xlim(0,450)
#ax.set_ylim(0,350)
ax.yaxis.grid(True, color='grey', linestyle='--', linewidth=1, alpha=0.3)
ax.xaxis.grid(True, color='red', linestyle='--', linewidth=1, alpha=0.5)
plt.show()
| 2.9375
| 3
|
pwa_store_backend/pwas/migrations/0016_auto_20210814_1636.py
|
nathanhfoster/pwa-store-backend
| 0
|
12776110
|
# Generated by Django 3.1.13 on 2021-08-14 16:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pwas', '0015_auto_20210814_1622'),
]
operations = [
migrations.RemoveField(
model_name='pwa',
name='tags',
),
migrations.AddField(
model_name='pwa',
name='tags',
field=models.ManyToManyField(related_name='tags', to='pwas.Tag'),
),
]
| 1.445313
| 1
|
test/integration/streams/test_stream.py
|
RSNirwan/my_utils
| 0
|
12776111
|
<reponame>RSNirwan/my_utils<filename>test/integration/streams/test_stream.py
from typing import Any
from dataclasses import dataclass
import pytest
from my_utils.streams.stream import Stream
@dataclass
class Inp:
start: int # required
end: int # required
value: Any # optional
@pytest.fixture
def inps():
return [
Inp(start=5, end=11, value="sth"),
Inp(start=11, end=12, value="sth"),
Inp(start=14, end=22, value="sth"),
Inp(start=0, end=9, value="sth"),
]
def test_Stream(inps):
elements = [10, 15, 20] # will create 2 trafos (10, 15) and (15, 20)
initial_state = 0
def update_state(state, inp, start, end):
diff = min(inp.end, end) - max(inp.start, start)
return state + diff
a = []
collector = lambda x: a.append(x)
stream = Stream(elements, initial_state, update_state, collector)
for inp in inps:
stream(inp)
stream(None) # will close all transformations, which then send their data to sink
assert a == [3, 5] # state of each trafo
| 2.453125
| 2
|
setup.py
|
bliof/csvtotable
| 0
|
12776112
|
#!/usr/bin/env python
from setuptools import setup
_version = "2.1.1"
setup(name="csvtotable",
version=_version,
description="Simple commandline utility to convert CSV files"
"to searchable and sortable HTML table.",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/vividvilla/csvtotable",
packages=["csvtotable"],
include_package_data=True,
download_url="https://github.com/vividvilla/csvtotable/archive/{}.tar.gz".format(_version),
license="MIT",
classifiers=[
"Development Status :: 4 - Beta", "Intended Audience :: Developers", "Programming Language :: Python",
"Natural Language :: English", "License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5", "Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Libraries"
],
install_requires=["click >= 6.7", "jinja2 >= 2.9.6", "unicodecsv >= 0.14.1", "six >= 1.10.0"],
entry_points={"console_scripts": [
"csvtotable = csvtotable.cli:cli",
]})
| 1.898438
| 2
|
lib/nets/utils.py
|
HungWei-Andy/tf-mask-rcnn
| 2
|
12776113
|
import tensorflow as tf
from nets.network import Network
import numpy as np
# !! The default data format used here is NHWC !!
# TODO: scope
def conv_bn(X, inChannel, outChannel, kernel, istrain, stride=1, name=None):
out = tf.layers.conv2d(X, outChannel, kernel, stride, 'same', use_bias=False, name=name)
out = tf.layers.batch_normalization(out, training=istrain,
name=name.replace('res', 'bn').replace('conv1', 'bn_conv1'))
return out
def conv_bn_relu(X, inChannel, outChannel, kernel, istrain, stride=1, use_bias=False, name=None):
out = conv_bn(X, inChannel, outChannel, kernel, istrain, stride=stride, name=name)
out = tf.nn.relu(out)
return out
def residual_conv(X, inChannel, interChannel, outputChannel, transition, istrain=False, name=None):
if transition:
init_stride = 2
else:
init_stride = 1
if inChannel == outputChannel:
skip_out = X
else:
skip_out = conv_bn(X, inChannel, outputChannel, 1, istrain,
stride=init_stride, name=name+'_branch1')
conv_out = conv_bn_relu(X, inChannel, interChannel, 1, istrain, name=name+'_branch2a')
conv_out = conv_bn_relu(conv_out, interChannel, interChannel, 3, istrain,
stride=init_stride, name=name+'_branch2b')
conv_out = conv_bn(conv_out, interChannel, outputChannel, 1, istrain, name=name+'_branch2c')
out = tf.nn.relu(skip_out + conv_out)
return out
def residual_block(X, inChannel, interChannel, outputChannel, numLayers,
transition=True, istrain=False, number_name=True, name=None):
if number_name and numLayers > 3:
names = [name+'a'] + [name+'b'+str(i+1) for i in range(numLayers-1)]
else:
names = [name+chr(ord('a')+i) for i in range(numLayers)]
out = residual_conv(X, inChannel, interChannel, outputChannel,
transition=transition, istrain=istrain, name=names[0])
for i in range(numLayers - 1):
out = residual_conv(out, outputChannel, interChannel, outputChannel,
transition=False, istrain=istrain, name=names[i+1])
return out
class ResNet(Network):
def __init__(self, scope=None, istrain=False, reuse=False):
super(ResNet, self).__init__(scope)
self.reuse = reuse
self.istrain = istrain
def _build_resnet(self, numBlock1, numBlock2, numBlock3, numBlock4):
number_name = (self._scope != 'resnet50')
self.conv1 = conv_bn_relu(self.input, 3, 64, 7, istrain=self.istrain, stride=2, name='conv1')
self.pool1 = tf.layers.max_pooling2d(self.conv1, 3, 2, padding='same')
self.conv2 = residual_block(self.pool1, 64, 64, 256, numBlock1, transition=False,
istrain=self.istrain, number_name=number_name, name='res2')
self.conv3 = residual_block(self.conv2, 256, 128, 512, numBlock2,
istrain=self.istrain, number_name=number_name, name='res3')
self.conv4 = residual_block(self.conv3, 512, 256, 1024, numBlock3,
istrain=self.istrain, number_name=number_name, name='res4')
self.conv5 = residual_block(self.conv4, 1024, 512, 2048, numBlock4,
istrain=self.istrain, number_name=number_name, name='res5')
self.pool5 = tf.layers.average_pooling2d(self.conv5, 7, 1)
self.pool5_flat = tf.layers.flatten(self.pool5)
self.scores = tf.layers.dense(self.pool5_flat, 1000, name='fc1000')
return self.scores
def find_key_name(self, var):
key, name = var.name.split('/')[-2:]
if 'kernel' in name or 'weight' in name:
name = 'weights'
elif 'bias' in name:
name = 'biases'
elif 'mean' in name:
name = 'mean'
elif 'variance' in name:
name = 'variance'
elif 'gamma' in name:
name = 'scale'
elif 'beta' in name:
name = 'offset'
else:
raise Exception('%s is not defined in official resnet deploy.txt'%name)
return key, name
def load(self, sess, pretrained_file):
data = np.load(pretrained_file).item()
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self._scope)
for var in variables:
key, name = self.find_key_name(var)
sess.run(var.assign(data[key][name]))
class ResNet50(ResNet):
def __init__(self, *args, **kwargs):
super(ResNet50, self).__init__('resnet50', *args, **kwargs)
def _build_network(self):
return self._build_resnet(3, 4, 6, 3)
class ResNet101(ResNet):
def __init__(self, *args, **kwargs):
super(ResNet101, self).__init__('resnet101', *args, **kwargs)
def _build_network(self):
return self._build_resnet(3, 4, 23, 3)
class ResNet152(ResNet):
def __init__(self, *args, **kwargs):
super(ResNet152, self).__init__('resnet152', *args, **kwargs)
def _build_network(self):
return self._build_resnet(3, 8, 36, 3)
| 2.6875
| 3
|
python/nluflows/mapping/bp/__init__.py
|
jiportilla/ontology
| 0
|
12776114
|
from .service_catalog_mapper import ServiceCatalogMapper
| 1.15625
| 1
|
train_DCNN.py
|
taka4abe/JACC_CV
| 1
|
12776115
|
# -*- coding: utf-8 -*-
# This is the part of the codes used for the article entitled "A Deep Learning
# Approach for Assessment of Regional Wall Motion Abnormality from
# Echocardiographic Images" for JACC CV imaging.
#
# Before using this code, please prepare image data at "./data_folder" dir.
#
# ./data_folder/train/Norm
# ./data_folder/train/LAD
# ./data_folder/train/LCXD
# ./data_folder/train/RCA
#
# ./data_folder/test/Norm
# ./data_folder/test/LAD
# ./data_folder/test/LCX
# ./data_folder/test/RCA
#
# Each dir should have echocardiographic images (.png is recommended and .jpg
# acceptable) that contains endo-diastolic, mid-systolic, and endo-systolic
# phases. We put endo-diastolic for red color image channel, mid-systolic for
# Green and endo-systolic for Blue image channle with Python3.5 programming
# language with PIL and numpy libraries.
#
# This code was used with
# OS: Ubuntu 14.04LTS
# Programming language: Python 3.5 Anaconda
# Deep Learning library: tensorflow-gpu 1.4.1, Keras 2.1.5
# CUDA toolkit 8.0, CuDNN v5.1
# Python libraries: numpy 1.14.2, Pillow 5.0.0
#
#
# If NeuralNet == "Xception":
# this code takes about 4 min for training (100 epoches, 320 train/valid)
# with core i7 6850K, RAM 256GB, NVMe SSD w 3.5" HDD, 1080ti.
import os, keras
import numpy as np
from datetime import datetime
from PIL import Image
from keras.models import Sequential
from keras.layers import Activation, Dense, Dropout
from keras.utils.np_utils import to_categorical
from keras.optimizers import Adam
# to select which neuralnetwork to use
#NeuralNet = 'VGG16' # ILSVRC image classification top-1 accuracy of 0.715
#NeuralNet = 'VGG19' # ILSVRC image classification top-1 accuracy of 0.727
NeuralNet = 'ResNet50' # ILSVRC image classification top-1 accuracy of 0.759
#NeuralNet = 'DenseNet201' # ILSVRC image classification top-1 accuracy of 0.770
#NeuralNet = 'InceptionV3' # ILSVRC image classification top-1 accuracy of 0.788
#NeuralNet = 'Xception' # ILSVRC image classification top-1 accuracy of 0.790
#NeuralNet = 'IncResV2' # ILSVRC image classification top-1 accuracy of 0.804
# making training data
image_list = []
label_list = []
for dir_name in os.listdir("data_folder/train"):
dir_train = "data_folder/train/" + dir_name
label = 0
if dir_name == "LAD":
label = 0
elif dir_name == "LCX":
label = 1
elif dir_name == "RCA":
label = 2
elif dir_name == "Norm":
label = 3
for file_name in os.listdir(dir_train):
label_list.append(label)
filepath = dir_train + "/" + file_name
if NeuralNet == 'Xception':
image = np.array(Image.open(filepath).resize((128, 128)))
else:
image = np.array(Image.open(filepath).resize((224, 224)))
image_list.append(image / 255)
image_list = np.array(image_list)
label_list = to_categorical(label_list)
#making neural network
if NeuralNet == 'VGG16':
print('NeuralNetwork: VGG16.\nILSVRC top-1 accuracy of 0.715')
DCNN = keras.applications.vgg16.VGG16(
include_top=True, input_tensor=None, pooling=None, classes=1000)
elif NeuralNet == 'VGG19':
print('NeuralNetwork: VGG16.\nILSVRC top-1 accuracy of 0.727')
DCNN = keras.applications.vgg19.VGG19(
include_top=True, input_tensor=None, pooling=None, classes=1000)
elif NeuralNet == 'ResNet50':
print('NeuralNetwork: ResNet50.\nILSVRC top-1 accuracy of 0.759')
DCNN = keras.applications.resnet50.ResNet50(
include_top=True, input_tensor=None, pooling=None, classes=1000)
elif NeuralNet == 'DenseNet201':
print('NeuralNetwork: DenseNet201.\nILSVRC top-1 accuracy of 0.770')
DCNN = keras.applications.densenet.DenseNet201(
include_top=True, input_tensor=None, pooling=None, classes=1000)
elif NeuralNet == 'InceptionV3':
print('NeuralNetwork: InceptionV3.\nILSVRC top-1 accuracy of 0.788')
DCNN = keras.applications.inception_v3.InceptionV3(
include_top=True, input_tensor=None, pooling=None, classes=1000)
elif NeuralNet == 'Xception':
print('NeuralNetwork: Xception.\nILSVRC top-1 accuracy of 0.790')
DCNN = keras.applications.xception.Xception(
include_top=True, input_tensor=None, pooling=None, classes=1000)
elif NeuralNet == 'IncResV2':
print('NeuralNetwork: Inception-ResNet-V2.\nILSVRC top-1 accuracy of 0.804')
DCNN = keras.applications.inception_resnet_v2.InceptionResNetV2(
include_top=True, input_tensor=None, pooling=None, classes=1000)
else:
print('error, no neural network.')
opt = Adam(lr = 0.0001)
model = Sequential()
model.add((DCNN))
model.add(Dropout(0.5))
model.add(Dense(4))
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy",
optimizer=opt, metrics=["accuracy"])
#training
print('training')
model.fit(image_list, label_list,
epochs=100, batch_size=16, validation_split=0.2)
#saving post-trained model
prefix = datetime.now().strftime("%Y"+"_"+"%m%d"+"_"+"%H%M")
save_name = NeuralNet + '_' + prefix + '.h5'
model.save_weights(save_name)
print('saving post-trained model:', save_name)
print('finished training.')
print('finished: train_DCNN.py')
| 2.375
| 2
|
heron/common/tests/python/pex_loader/pex_loader_unittest.py
|
takeratta/heron
| 1
|
12776116
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Unittest for pex_loader'''
import os
import unittest
import re
import sys
import heron.common.src.python.pex_loader as pex_loader
import heron.common.tests.python.pex_loader.constants as constants
# pylint: disable=missing-docstring
class PexLoaderTest(unittest.TestCase):
def test_deps_regex(self):
# Testing egg_regex to find dependencies
pass_test_cases = [".deps/sample_egg.egg/",
".deps/sample_egg_1234.egg/",
".deps/sample_egg.egg.egg/",
".deps/sample_egg.whl/",
".deps/sample.egg.whl/"]
for test in pass_test_cases:
# should match without the trailing slash
self.assertEqual(re.match(pex_loader.egg_regex, test).group(1), test[:-1])
fail_test_cases = [".deps/sample_egg/",
".deps/sample_egg.egg", # no trailing slash
".deps/sample/egg.egg/", # contains slash
".deps/sample_ egg.egg/", # contains space
"deps/sample_egg.egg/", # not starting from .deps
"/.deps/sample_egg.egg/", # starting from slash
".deps/sample_whl/",
".deps/sample.egg.wh/",
".deps/sample.whl.egg"]
for test in fail_test_cases:
self.assertIsNone(re.match(pex_loader.egg_regex, test))
def test_load_pex(self):
# Testing load_pex without including deps (including deps requires an actual zip file)
test_path = ['sample.pex', 'sample_123.pex', '/tmp/path.pex']
for path in test_path:
pex_loader.load_pex(path, include_deps=False)
abs_path = os.path.abspath(path)
self.assertIn(os.path.dirname(abs_path), sys.path)
def test_sample(self):
path = self.get_path_of_sample(constants.SAMPLE_PEX)
print(path)
pex_loader.load_pex(path)
cls = pex_loader.import_and_get_class(path, constants.SAMPLE_PEX_CLASSPATH)
self.assertIsNotNone(cls)
self.assertEqual(cls.name, "sample class")
self.assertEqual(cls.age, 100)
@staticmethod
def get_path_of_sample(sample):
file_dir = "/".join(os.path.realpath(__file__).split('/')[:-1])
testdata_dir = os.path.join(file_dir, constants.TEST_DATA_PATH)
sample_pex_path = os.path.join(testdata_dir, sample)
return sample_pex_path
| 2.21875
| 2
|
pdxpixel/apps/search/views.py
|
nicorellius/pdxpixel
| 1
|
12776117
|
from watson.views import BaseListView
class SearchView(BaseListView):
template_name = 'search/default.html'
| 1.328125
| 1
|
review/models.py
|
hossainchisty/Multi-Vendor-eCommerce
| 16
|
12776118
|
from cloudinary.models import CloudinaryField
from customers.models import Customer
from django.db import models
from django.utils.translation import gettext as _
from model.common_fields import BaseModel
from product.models import Product
class Review(BaseModel):
""" Review model for products """
customer = models.ForeignKey(Customer, on_delete=models.SET_NULL, null=True)
product = models.ForeignKey(Product, on_delete=models.SET_NULL, null=True)
class Star(models.IntegerChoices):
VS = 5, _('Very satisfied')
ST = 4, _('Satisfied')
NT = 3, _('Neutral')
US = 2, _('Unsatisfied')
VN = 1, _('Very unsatisfied')
star = models.PositiveSmallIntegerField(
_("stars"), choices=Star.choices, default=5)
reviewImage = CloudinaryField('image', null=True, blank=True)
feedback = models.TextField(
help_text="Please share your feedback about the product was the product as described? What is the quality like?",
)
riderReview = models.TextField(
help_text="How was your overall experience with our rider?",
null=True,
blank=True,
)
def __str__(self):
return f"Customer: {self.customer} - Product: {self.product} Rating: - {self.star}"
class Meta:
ordering = ('-star',)
verbose_name_plural = _("Customer feedback")
| 2.25
| 2
|
pollirio/modules/channel.py
|
dpaleino/pollirio
| 0
|
12776119
|
# -*- coding: utf-8 -*-
from pollirio.modules import expose
from pollirio import choose_dest
from pollirio import conf
@expose('mass')
def mass(bot, ievent, msg=None):
if ievent.channel == conf.nickname:
return
modes = bot.userlist[ievent.channel.lower()][ievent.nick]
if '~' in modes or \
'&' in modes or \
'@' in modes or \
'%' in modes or \
ievent.nick == 'dapal':
users = sorted(bot.userlist[ievent.channel.lower()].keys())
bot.msg(ievent.channel, ' '.join(users))
if msg:
bot.msg(ievent.channel, '\x02%s\x0F' % msg)
@expose('call', 1)
def call(bot, ievent):
""" call <messaggio> """
if ievent.channel == conf.nickname:
return
args = ievent.msg.split(' ', 1)
mass(bot, ievent, msg=args[1])
| 2.171875
| 2
|
test.py
|
deemozyc/zycs_WiFi_location
| 2
|
12776120
|
# coding:utf-8
from sklearn import svm
from sklearn.svm import SVC
import csv
import matplotlib
import matplotlib.pyplot as plt
from base import *
def print_base(): #输出采集点地图
tst_coord = get_csv('db/02/tst01crd.csv')
tst_coord = coord_zip(tst_coord)
fig,ax = plt.subplots()
plt.axis([2, 16, 14, 32])
plt.scatter([ i[0] for i in tst_coord], [i[1] for i in tst_coord])
"""
plt.plot([6,6],[14,32],color="red")
plt.plot([11,11],[14,32],color="red")
plt.plot([2,16],[21,21],color="red")
plt.plot([2,16],[25,25],color="red")
"""
#画书架
shelve_coord = get_shelve_coord()
for i in range(0,len(shelve_coord),4): #4个坐标一个书架
x_min = min(shelve_coord[i][0], shelve_coord[i+1][0], shelve_coord[i+2][0], shelve_coord[i+3][0])
x_max = max(shelve_coord[i][0], shelve_coord[i+1][0], shelve_coord[i+2][0], shelve_coord[i+3][0])
y_min = min(shelve_coord[i][1], shelve_coord[i+1][1], shelve_coord[i+2][1], shelve_coord[i+3][1])
y_max = max(shelve_coord[i][1], shelve_coord[i+1][1], shelve_coord[i+2][1], shelve_coord[i+3][1])
rect = mpathes.Rectangle([x_min, y_min],x_max-x_min, y_max-y_min,color='gray',alpha=0.5)
ax.add_patch(rect)
"""
svm分类线
plt.plot([4.1347719, 4.1347719],[25.64033581, 29.21654402],color="red")
plt.plot([4.1347719, 4.1347719],[22.06412759, 23.8522317],color="red")
plt.plot([4.1347719, 4.1347719],[16.69981527, 20.27602349],color="red")
plt.plot([8.52431189, 8.52431189],[25.64033581, 29.21654402],color="red")
plt.plot([8.52431189, 8.52431189],[22.06412759, 23.8522317],color="red")
plt.plot([8.52431189, 8.52431189],[16.69981527, 20.27602349],color="red")
plt.plot([12.91385188, 12.91385188],[25.64033581, 29.21654402],color="red")
plt.plot([12.91385188, 12.91385188],[22.06412759, 23.8522317],color="red")
plt.plot([12.91385188, 12.91385188],[16.69981527, 20.27602349],color="red")
"""
plt.show()
def print_all_CDF():
#文件为knn_err_data.txt, svm_err_data.txt
f=open("knn_err_data.txt","r")
knn_data_list = []
for line in f.readlines():
knn_data_list.append(float(line))
print "knn_err_data:", len(knn_data_list), sum(knn_data_list)/len(knn_data_list), " m"
f.close()
f=open("svm_err_data.txt","r")
svm_data_list = []
for line in f.readlines():
svm_data_list.append(float(line))
print "svm_err_data:", len(svm_data_list),sum(svm_data_list)/len(svm_data_list), " m"
f.close()
svm_data_list.sort()
knn_data_list.sort()
y_data = []
for i in range(len(svm_data_list)):
y_data.append((i+1.0)/len(svm_data_list)*100)
#plt.title('CDF graph')
plt.xlabel('Error distance(m)')
plt.ylabel('percentage(%)')
plt.plot(knn_data_list,y_data,"--,",label='knn')
plt.plot(svm_data_list,y_data,"-.,",label='svm')
plt.legend()
plt.show()
return
if __name__ == "__main__":
print "it is test.py!"
"""
数据集个数的数据
x_data = [1,2,4,8,15]
y_data = [2.38,1.96,1.55,1.21,0.97]
plt.ylim((0,2.5))
plt.xlabel('number of train set')
plt.ylabel('error(m)')
"""
"""
k值
x_data = [1,3,5,6,9,15]
y_data = [2.56,2.34,2.41,2.45,2.6,2.85]
#plt.ylim((0,2.5))
plt.xlabel('K')
plt.ylabel('error(m)')
"""
"""
AP选择
x_data = [448,200,100,80,60,40,20,10,5]
y_data = [2.34,2.32,2.31,2.30,2.36,2.50,2.42,2.61,2.99]
#plt.ylim((0,2.5))
plt.xlabel('number of AP')
plt.ylabel('error(m)')
"""
"""
write and read
aa = [2,3,4,1,2]
f=open("test.txt","w")
for line in aa:
f.write(str(line)+'\n')
f.close()
f=open("test.txt","r")
data_list = []
for line in f.readlines():
data_list.append(float(line))
print data_list
"""
print_base()
#print_all_CDF()
#plt.plot(x_data,y_data)
#plt.show()
| 2.65625
| 3
|
hosts/league_setup.py
|
nathanbraun/fantasy-developer-kit
| 6
|
12776121
|
import hosts.fleaflicker as site
from hosts.db import overwrite_league
from pandas import DataFrame
import sqlite3
from utilities import DB_PATH
LEAGUE_ID = 34958
TEAM_ID = 217960
LEAGUE_NAME = "Family League"
HOST = 'fleaflicker'
SCORING = {'qb': 'pass6', 'skill': 'ppr', 'dst': 'high'}
# open up our database connection
conn = sqlite3.connect(DB_PATH)
# team list
teams = site.get_teams_in_league(LEAGUE_ID)
overwrite_league(teams, 'teams', conn, LEAGUE_ID)
# schedule info
schedule = site.get_league_schedule(LEAGUE_ID)
overwrite_league(schedule, 'schedule', conn, LEAGUE_ID)
# league info
league = DataFrame([{'league_id': LEAGUE_ID, 'team_id': TEAM_ID, 'host':
HOST.lower(), 'name': LEAGUE_NAME, 'qb_scoring':
SCORING['qb'], 'skill_scoring': SCORING['skill'],
'dst_scoring': SCORING['dst']}])
overwrite_league(league, 'league', conn, LEAGUE_ID)
| 2.75
| 3
|
TWLight/users/migrations/0024_userprofile_lang.py
|
saloniig/TWLight
| 67
|
12776122
|
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("users", "0023_auto_20170820_1623")]
operations = [
migrations.AddField(
model_name="userprofile",
name="lang",
field=models.CharField(
blank=True,
max_length=128,
null=True,
help_text="Language",
choices=[(b"en", "English"), (b"fi", "Finnish"), (b"fr", "French")],
),
)
]
| 1.765625
| 2
|
tests/test_property.py
|
frmdstryr/atom
| 1
|
12776123
|
<gh_stars>1-10
#------------------------------------------------------------------------------
# Copyright (c) 2013-2018, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
"""Test the property and cached property member
"""
import pytest
from atom.api import (Atom, Int, Property, GetAttr, SetAttr,
observe, cached_property)
from atom.catom import DelAttr, reset_property
def test_property1():
"""Test defining a property using the arguments.
"""
def get_function(obj):
return obj.i
def set_function(obj, value):
obj.i = value
def del_function(obj):
del obj.i
class PropertyTest(Atom):
p = Property(get_function, set_function, del_function)
i = Int()
assert not PropertyTest.p.cached
assert PropertyTest.p.fget is get_function
assert PropertyTest.p.fset is set_function
assert PropertyTest.p.fdel is del_function
pt = PropertyTest()
assert pt.p == 0
pt.p = 10
assert pt.i == 10
assert pt.p == 10
del pt.p
assert pt.p == 0
def test_property2():
"""Test defining a property using the decorators.
"""
class PropertyTest(Atom):
p = Property()
i = Int()
@p.getter
def get_function(obj):
return obj.i
@p.setter
def set_function(obj, value):
obj.i = value
@p.deleter
def del_function(obj):
del obj.i
pt = PropertyTest()
assert pt.p == 0
pt.p = 10
assert pt.i == 10
assert pt.p == 10
del pt.p
assert pt.p == 0
def test_property3():
"""Test defining a property mangled method names.
"""
class PropertyTest(Atom):
p = Property()
i = Int()
def _get_p(self):
return self.i
def _set_p(self, value):
self.i = value
def _del_p(self):
del self.i
pt = PropertyTest()
assert pt.p == 0
pt.p = 10
assert pt.i == 10
assert pt.p == 10
del pt.p
assert pt.p == 0
def test_property4():
"""Test handling missing function(fget, fset, fdel)
"""
class PropertyTest(Atom):
p = Property()
pt = PropertyTest()
with pytest.raises(AttributeError):
pt.p
with pytest.raises(AttributeError):
pt.p = 1
with pytest.raises(AttributeError):
del pt.p
def test_cached_property():
"""Test using a cached property.
"""
class PropertyTest(Atom):
i = Int()
@cached_property
def prop(self):
self.i += 1
return self.i
assert PropertyTest.prop.cached
pt = PropertyTest()
assert pt.prop == 1
assert pt.prop == 1
pt.get_member('prop').reset(pt)
assert pt.prop == 2
def test_enforce_read_only_cached_property():
"""Check a cached property has to be read-only.
"""
def get(self):
pass
def set(self, value):
pass
with pytest.raises(ValueError):
Property(get, set, cached=True)
with pytest.raises(ValueError):
p = Property(cached=True)
p.setter(set)
def test_observed_property():
"""Test observing a property.
"""
class PropertyTest(Atom):
i = Int()
counter = Int()
prop = Property()
@prop.getter
def _prop(self):
self.i += 1
return self.i
@observe('prop')
def observe_cp(self, change):
self.counter += 1
pt = PropertyTest()
assert pt.prop == 1
assert pt.prop == 2
pt.observe('prop', pt.observe_cp)
pt.get_member('prop').reset(pt)
assert pt.counter == 2
def test_wrong_reset_arguments():
"""Test the handling of wrong arguments in reset.
"""
prop = Property()
with pytest.raises(TypeError) as excinfo:
reset_property()
assert '2 arguments' in excinfo.exconly()
with pytest.raises(TypeError) as excinfo:
reset_property(None, None)
assert 'Member' in excinfo.exconly()
with pytest.raises(TypeError) as excinfo:
prop.reset(None)
assert 'CAtom' in excinfo.exconly()
with pytest.raises(SystemError) as excinfo:
prop.reset(Atom())
assert 'invalid member index' in excinfo.exconly()
@pytest.mark.parametrize('mode, func',
[(GetAttr, 'set_getattr_mode'),
(SetAttr, 'set_setattr_mode'),
(DelAttr, 'set_delattr_mode')])
def test_property_mode_args_validation(mode, func):
"""Test that a delegator properly validate the arguments when setting mode.
"""
with pytest.raises(TypeError) as excinfo:
getattr(Property(), func)(getattr(mode, 'Property'), 1)
assert 'callable or None' in excinfo.exconly()
| 2.5625
| 3
|
tests/test_tyco107.py
|
atollk/flake8-typing-collections
| 2
|
12776124
|
<filename>tests/test_tyco107.py
from tests.util import BaseTest
class Test_TYCO107(BaseTest):
@classmethod
def flags(cls):
return ["--tyco_generic_alt"]
def test_pass_1(self):
code = """
import typing
def foo(x: typing.AbstractSet):
...
"""
result = self.run_flake8(code)
assert result == []
def test_fail_1(self):
code = """
import collections.abc
def foo(x: collections.abc.Set):
...
"""
result = self.run_flake8(code)
self.assert_error_at(result, "TYCO107", 2, 12)
def test_fail_2(self):
code = """
from collections.abc import Set
def foo(x) -> Set:
...
"""
result = self.run_flake8(code)
self.assert_error_at(result, "TYCO107", 2, 15)
| 2.671875
| 3
|
models/DUP_Net.py
|
KaidongLi/pytorch-LatticePointClassifier
| 0
|
12776125
|
import os
import numpy as np
import torch
import torch.nn as nn
from .pu_net import PUNet
class SORDefense(nn.Module):
"""Statistical outlier removal as defense.
"""
def __init__(self, k=2, alpha=1.1):
"""SOR defense.
Args:
k (int, optional): kNN. Defaults to 2.
alpha (float, optional): \miu + \alpha * std. Defaults to 1.1.
"""
super(SORDefense, self).__init__()
self.k = k
self.alpha = alpha
def outlier_removal(self, x):
"""Removes large kNN distance points.
Args:
x (torch.FloatTensor): batch input pc, [B, K, 3]
Returns:
torch.FloatTensor: pc after outlier removal, [B, N, 3]
"""
pc = x.clone().detach().double()
B, K = pc.shape[:2]
pc = pc.transpose(2, 1) # [B, 3, K]
inner = -2. * torch.matmul(pc.transpose(2, 1), pc) # [B, K, K]
xx = torch.sum(pc ** 2, dim=1, keepdim=True) # [B, 1, K]
dist = xx + inner + xx.transpose(2, 1) # [B, K, K]
assert dist.min().item() >= -1e-6
# the min is self so we take top (k + 1)
neg_value, _ = (-dist).topk(k=self.k + 1, dim=-1) # [B, K, k + 1]
value = -(neg_value[..., 1:]) # [B, K, k]
value = torch.mean(value, dim=-1) # [B, K]
mean = torch.mean(value, dim=-1) # [B]
std = torch.std(value, dim=-1) # [B]
threshold = mean + self.alpha * std # [B]
bool_mask = (value <= threshold[:, None]) # [B, K]
sel_pc = [x[i][bool_mask[i]] for i in range(B)]
return sel_pc
def forward(self, x):
with torch.no_grad():
x = self.outlier_removal(x)
return x
class DUPNet(nn.Module):
def __init__(self, sor_k=2, sor_alpha=1.1,
npoint=1024, up_ratio=4):
super(DUPNet, self).__init__()
self.npoint = npoint
self.sor = SORDefense(k=sor_k, alpha=sor_alpha)
self.pu_net = PUNet(npoint=self.npoint, up_ratio=up_ratio,
use_normal=False, use_bn=False, use_res=False)
def process_data(self, pc, npoint=None):
"""Process point cloud data to be suitable for
PU-Net input.
We do two things:
sample npoint or duplicate to npoint.
Args:
pc (torch.FloatTensor): list input, [(N_i, 3)] from SOR.
Need to pad or trim to [B, self.npoint, 3].
"""
if npoint is None:
npoint = self.npoint
B = len(pc)
proc_pc = torch.zeros((B, npoint, 3)).float().cuda()
for pc_idx in range(B):
one_pc = pc[pc_idx]
# [N_i, 3]
N = len(one_pc)
if N > npoint:
# random sample some of them
idx = np.random.choice(N, npoint, replace=False)
idx = torch.from_numpy(idx).long().cuda()
one_pc = one_pc[idx]
elif N < npoint:
# just duplicate to the number
duplicated_pc = one_pc
num = npoint // N - 1
for i in range(num):
duplicated_pc = torch.cat([
duplicated_pc, one_pc
], dim=0)
num = npoint - len(duplicated_pc)
# random sample the remaining
idx = np.random.choice(N, num, replace=False)
idx = torch.from_numpy(idx).long().cuda()
one_pc = torch.cat([
duplicated_pc, one_pc[idx]
], dim=0)
proc_pc[pc_idx] = one_pc
return proc_pc
def forward(self, x):
# import pdb; pdb.set_trace()
with torch.no_grad():
x = self.sor(x) # a list of pc
x = self.process_data(x) # to batch input
x = self.pu_net(x) # [B, N * r, 3]
return x
| 2.84375
| 3
|
ex068.py
|
nascimentobrenda24/PythonExercises
| 1
|
12776126
|
# Faça um programa que jogue par ou ímpar com o computador. O jogo só será interrompido quando o jogador perder,
# mostrando o total de vitórias consecutivas que ele conquistou no final do jogo.
print(20 * '\033[1;33m=*', 'JOGO PAR & ÍMPAR', '=*' * 20)
print('\033[1;34m~~' * 20, 'MANUAL DE INSTRUÇÕES DO GAME:', '~~' * 200,
'\n1º) Impar e Par, aqui, são representados como: \033[1;4;31mI; \033[1;4;33mP \033[1;34m'
'\n2º) NO EMPATE TODOS GANHAM +0 pts'
'\n3º) TENHA UM BOM JOGO E DIVIRTA-SE\033[m')
print('\033[1;34m~~' * 200)
# Importações Necessárias
from random import choice, randint
from emoji import emojize
# Variávies de controle de 'while', contabilizadoras e aleatorização.
vitoria = False
vitoria_pc = False
usuario = pc = par_impar_pc = impar = par = 0
escolha_pc = ['P', 'I'] # PAR E IMPAR
# Enquanto a vitória do computador for falsa, rodaremos o loop.
while vitoria_pc == False:
# Definindo a função do usuário e suas escolhas
usuario = int(input('\033[1;31;40mESCOLHA UM NÚMERO INTEIRO: '))
par_impar_user = str(input('\033[1;34;40mAPOSTA EM QUAL?[PAR VS ÍMPAR]:\nR=')).upper()[0]
# Definindo a função e randomização do PC
pc = randint(1, 1000) # Possíveis escolhas do PC
par_impar_pc = choice(escolha_pc)
# Verificando vencedor VS perdedor
# 1ª hipótese: EMPATE. PC = USER.
if par_impar_user == par_impar_pc:
print(f'\033[1;4;31mO Usuário escolheu: {par_impar_user} e O PC escolheu: {par_impar_pc}')
# Se fora empate, os dois recebem 0 pt.
vitoria += 0
vitoria_pc += 0
# PLACAR ATUAL DE EMPATE
print('\033[1;30;46m')
print('~' * 200)
print(f'| PLACAR ATUAL |\n'
f'\n Vitórias do usuário: {vitoria} \n'
f'\n VS \n'
f'\n Vitórias do PC: {vitoria_pc} \n\033[m')
print('~' * 200)
# Passando dessa verificação
# (2ª hipótese: JOGO NORMAL)
else:
# Somaremos o número escolhido pelo usuário pelo do pc, para sabermos se é: PAR ou ÍMPAR.
valor_final = usuario + pc
# Definindo Números Ímpares
if valor_final % 2 != 0: # Número Ímpar é definido por seu quosciente não ser exato na divisão por 2.
impar += 1 # Números ímpares sendo contabilizados(Qualifica se é ímpar, nessa contagem)
# Classificando quem igualou-se, no palpite, ao Ímpar(PC OU USER).
# Quando for o user, o programa parabeniza-o e continua nesse loop infinito.
if par_impar_user == 'I' and impar != 0 and par_impar_pc == 'P':
print(f'| USUÁRIO: {usuario} VS PC: {pc} = {valor_final} = ÍMPAR|')
print(emojize('PARABÉNS, VOCÊ VENCEU, ESCOLHEU ÍMPAR!!!:sweat_smile:\nDA PRÓXIMA TE GANHAREI!!',
use_aliases=True))
vitoria += 1 # contabilizando as quantidades de vitórias do user
# PLACAR DA VITÓRIA DO USER
print('\033[1;30;46m')
print('~' * 200)
print(f'| PLACAR ATUAL |\n'
f'\n Vitórias do usuário: {vitoria} \n'
f'\n VS \n'
f'\n Vitórias do PC: {vitoria_pc} \n\033[m')
print('~' * 200)
# Quando for o PC, o programa, como pedido, parará. (break)
elif par_impar_pc == 'I' and impar != 0 and par_impar_user == 'P':
print(f'| USUÁRIO: {usuario} VS PC: {pc} = {valor_final} = ÍMPAR|')
print(emojize('\nHAHAHAHAHAHA VENCI. ESCOLHI ÍMPAR!!:joy:', use_aliases=True))
vitoria_pc += 1 # contabilizando as quantidades de vitórias do pc
break
# Definindo Números Pares
if valor_final % 2 == 0:
par += 1 # Números Pares sendo contabilizados(Qualifica se é Par, nessa contagem)
# Classificando quem igualou-se, no palpite, ao valor Par(PC OU USER).
# Quando for o user, o programa parabeniza-o e continua nesse loop infinito.
if par_impar_user == 'P' and par != 0 and par_impar_user == 'I':
print(f'| USUÁRIO: {usuario} VS PC: {pc} = {valor_final} = PAR|')
print(emojize('PARABÉNS, VOCÊ GANHOU, ESCOLHEU PAR!!:sweat_smile:\nDA PRÓXIMA TE GANHAREI!!',
use_aliases=True))
vitoria += 1 # contabilizando as quantidades de vitórias do usuário
# PLACAR DA VITÓRIA DO USER
print('\033[1;30;46m')
print('~' * 200)
print(f'| PLACAR ATUAL |\n'
f'\n Vitórias do usuário: {vitoria} \n'
f'\n VS \n'
f'\n Vitórias do PC: {vitoria_pc} \n\033[m')
print('~' * 200)
# Quando for o PC, o programa, como pedido, parará. (break)
elif par_impar_pc == 'P' and par != 0 and par_impar_user == 'I':
print(f'| USUÁRIO: {usuario} VS PC: {pc} = {valor_final} = PAR|')
print(emojize('HAHAHA VENCI, ESCOLHI PAR!!:joy:', use_aliases=True))
vitoria_pc += 1 # contabilizando as quantidades de vitórias do pc
break
'''# PLACAR FINAL( USUÁRIO VS PC )
print('\033[1;30;46m')
print('~' * 200)
print(f'| PLACAR FINAL |\n'
f'\n Vitórias do usuário: {vitoria} \n'
f'\n VS \n'
f'\n Vitórias do PC: {vitoria_pc} \n\033[m')
print('~' * 200)
# Outra solução própria
# Faça um programa para jogar par ou ímpar com o computador. O jogo só será interrompido quando o jogador perder,
# mostrando o total de vitórias consecutivas que ele conquistou no final do jogo.
# Adaptation: The game will stop when user request
from random import randint, choice
from emoji import emojize
victories_user = 0
victories_pc = 0
print('\033[1;30;41m=*'*20, 'BEM VINDOS AO' ' \033[1;30;46mPAIR OR OPP´s GAME\033[1;30;41m', '=*'*20)
while True:
proceed = str(input('Deseja prosseguir com o jogo? [S/N]')).upper()[0]
if proceed == 'N':
print('\033[1;33;40mJOGO FINALIZADO COM SUCESSO')
break
else:
# USER
user = int(input('\033[1;34;40mEscolha um valor de 0 a 10: '))
while user > 10:
print(emojize('\033[1;40;33mOps...Excedeu o valor proposto!!!\nTente Novamente:thumbs_up:'))
user = int(input('\033[1;34;40mEscolha um valor de 0 a 10: \033[m '))
user_choice = str(input('PAR OU IMPAR?[P/I]:')).upper()[0]
print(f'Você escolheu o número {user} e apostou no {user_choice}' if user_choice == 'PAR'
else f'Você escolheu o número {user} e apostou no {user_choice}MPAR \033[m')
# PC
random_pc_numbers = randint(0, 10)
pc_pair_opp = ['PAR', 'IMPAR']
random_pair_opp = choice(pc_pair_opp)
print(f'\033[1;33mO PC escolheu o número {random_pc_numbers} e apostou no {random_pair_opp} \033[m')
# Final Number's Winner
winner = random_pc_numbers + user
# Final Validation
#1) Winner
# 1º case : Sum pair
if winner % 2 == 0:
if user_choice == 'P' and random_pair_opp == 'IMPAR':
print(f'\033[1;30;42m PARABÉNS!! VOCÊ APOSTOU NO PAR E DEU {winner}')
victories_user += 1
elif user_choice == 'I' and random_pair_opp == 'PAR':
print(f'\033[1;30;42mOPS...VOCÊ APOSTOU IMPAR E PERDEU!!!\n O PC APOSTOU PAR E DEU JUSTAMENTE {winner}')
victories_pc += 1
else:
print(f'\033[1;30;42mCOINCIDÊNCIA OU NÃO...HOUVE EMPATE!!!\n VOCÊ APOSTOU {user_choice}AR E O PC TAMBÉM {random_pair_opp} '
f'E DEU JUSTAMENTE {winner}'if user_choice == 'P'
else f'COINCIDÊNCIA OU NÃO...HOUVE EMPATE!!!\n VOCÊ APOSTOU NO {user_choice}MPAR E O PC TAMBÉM APOSTOU NO {random_pair_opp}'
f'E DEU JUSTAMENTE {winner}')
victories_user += 1
victories_pc += 1
# 2 º case : Sum opp
if winner % 2 != 0:
if user_choice == 'I' and random_pair_opp == 'PAR':
print(f'\033[1;30;42mPARABÉNS!! VOCÊ APOSTOU NO IMPAR E DEU {winner}')
victories_user += 1
elif user_choice == 'P' and random_pair_opp == 'IMPAR':
print(f'\033[1;30;42mOPS...VOCÊ APOSTOU PAR E PERDEU!!\n O PC APOSTOU IMPAR E DEU {winner}')
victories_pc += 1
else:
print(f'\033[1;30;42mCOINCIDÊNCIA OU NÃO...HOUVE EMPATE!!!\n VOCÊ APOSTOU {user_choice}AR E O PC TAMBÉM APOSTOU NO {random_pair_opp} '
f' E DEU JUSTAMENTE {winner}' if user_choice == 'P'
else f'COINCIDÊNCIA OU NÃO...HOUVE EMPATE!!!\n VOCÊ APOSTOU {user_choice}MPAR E O PC TAMBÉM APOSTOU NO {random_pair_opp}'
f' E DEU JUSTAMENTE {winner}')
victories_user += 1
victories_pc += 1
# Final Score
print('=*'*15, f'PLACAR FINAL', '=*'*15)
print(f'\033[1;36;40m\n\nVOCÊ\033[m : {victories_user} \n\033[1;33;40m VS \n\033[1;35;40mPC\033[m : {victories_pc}')
print('\033[1;33;40m=*'*37)
# Score Validation
if victories_user > victories_pc:
print('FECHAMOS O PROGRAMA COM VOCÊ SENDO O VENCEDOR!!!\n Parabéns e volte sempre')
elif victories_pc == victories_user:
print('FECHAMOS O PROGRAMA COM EMPATE!!\nACEITAMOS REVANCHE!!')
else:
print('FECHAMOS O PROGRAMA COM A VITÓRIA DA MÁQUINA!!\n ACEITAMOS REVANCHE!!')
'''''
| 4.03125
| 4
|
Trakttv.bundle/Contents/Libraries/Shared/oem/media/show/__init__.py
|
disrupted/Trakttv.bundle
| 1,346
|
12776127
|
<reponame>disrupted/Trakttv.bundle
from oem.media.show.identifier import EpisodeIdentifier # NOQA
from oem.media.show.mapper import ShowMapper # NOQA
from oem.media.show.match import EpisodeMatch # NOQA
| 1.046875
| 1
|
panon/plasmoid/contents/scripts/panon/backend/get_pa_devices.py
|
ByteDream/dead-projects
| 0
|
12776128
|
from soundcard import pulseaudio as sc
import json
l = []
for mic in sc.all_microphones(exclude_monitors=False):
l.append({'id': mic.id, 'name': mic.name})
s = json.dumps(l)
print(s)
| 2.703125
| 3
|
src/pretix/control/forms/organizer.py
|
abrock/pretix
| 1
|
12776129
|
<gh_stars>1-10
from django import forms
from django.utils.translation import ugettext_lazy as _
from pretix.base.forms import VersionedModelForm
from pretix.base.models import Organizer
class OrganizerForm(VersionedModelForm):
error_messages = {
'duplicate_slug': _("This slug is already in use. Please choose a different one."),
}
class Meta:
model = Organizer
fields = ['name', 'slug']
def clean_slug(self):
slug = self.cleaned_data['slug']
if Organizer.objects.filter(slug=slug).exists():
raise forms.ValidationError(
self.error_messages['duplicate_slug'],
code='duplicate_slug',
)
return slug
class OrganizerUpdateForm(OrganizerForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['slug'].widget.attrs['disabled'] = 'disabled'
def clean_slug(self):
return self.instance.slug
| 1.976563
| 2
|
src/controllers/google_controller.py
|
qooba/heygoogle-with-rasa
| 1
|
12776130
|
import os
import json
import logging
from services.security import Security
from services.agent import AgentFactory
from controllers.controller import routes, Controller
from rasa.core.channels.channel import UserMessage
logger = logging.getLogger(__name__)
class GoogleController(Controller):
def __init__(self, security: Security):
super().__init__()
self.security=security
@routes.get('/')
async def hello(self, request):
text = request.rel_url.query['text']
response=await AgentFactory.load().handle_text(text)
logger.info(json.dumps(response))
return self.json(response)
@routes.post("/google_action")
async def google_webhook(self, request):
req = await request.json()
print(json.dumps(req))
authorization= request.headers['Google-Assistant-Signature']
print(authorization)
print(request.headers)
#self.security.verify_token(authorization)
session_id = req['session'].get('id', None)
locale = req['user']['locale']
lang = locale[:2]
if req['intent']['name'] == 'actions.intent.MAIN':
response_text=os.environ['WELCOME_TEXT']
else:
text = req['intent']['query']
user_message=UserMessage(text=text, sender_id=session_id)
response=await AgentFactory.load().handle_message(user_message)
logger.info(json.dumps(response))
response_text=response[0]['text']
resp={
"session": {
"id": "example_session_id",
"params": {}
},
"prompt": {
"override": False,
"firstSimple": {
"speech": response_text,
"text": response_text
}
},
"scene": {
"name": "Main",
"slots": {},
"next": {
"name": "Main"
}
}
}
return self.json(resp)
#
#
# @routes.post('/google_action')
# def google_action(item: dict, authorization: str = Header(None)):
# print(item, file=sys.stderr)
# print(authorization, file=sys.stderr)
# context = ConversationContext()
# context.request = item
# context: ConversationContext = handler_manager.invoke(context)
# print(context.response, file=sys.stderr)
# return json.loads(context.response)
| 2.328125
| 2
|
upload.py
|
markraemer/limesurvey-factorial-vignette
| 1
|
12776131
|
<filename>upload.py
import base64
from limesurveyrc2api.limesurvey import LimeSurvey
url = "https://livinginsmarthomes.limequery.com/admin/remotecontrol"
username = ""
password = ""
surveyID = ""
# Open a session.
api = LimeSurvey(url=url, username=username)
api.open(password=password)
# Get a list of surveys the admin can see, and print their IDs.
result = api.survey.list_surveys()
#for survey in result:
#print(survey.get("sid"))
# check if any questions have been uploaded previously
# this will replace all previous uploads
result = api.list_groups(surveyID)
if ('status' not in result):
print("deleting all groups")
for group in result:
api.delete_group(surveyID,group['gid'])
#
print("uploading data ...")
up_file = open("output/question-group.lsg","r")
data = up_file.read()
encoded = base64.b64encode(data.encode())
api.import_group(surveyID,encoded.decode(),"lsg")
# dirty hack to overcome group ordering problem
api.delete_group(surveyID,api.add_group(surveyID,"dummy","dummy"))
print("... done")
# Close the session.
api.close()
| 2.859375
| 3
|
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Weights/Correlations/FLOPS/operating_items.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
| 0
|
12776132
|
## @ingroup Methods-Weights-Correlations-FLOPS
# operating_items.py
#
# Created: May 2020, <NAME>
# Modified:
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from SUAVE.Core import Units, Data
import numpy as np
## @ingroup Methods-Weights-Correlations-FLOPS
def operating_items_FLOPS(vehicle):
""" Calculate the weight of operating items, including:
- crew
- baggage
- unusable fuel
- engine oil
- passenger service
- ammunition and non-fixed weapons
- cargo containers
Assumptions:
If no tanks are specified, 5 fuel tanks are assumed (includes main and auxiliary tanks)
If the number of coach seats is not defined, then it assumed that 5% of
of the seats are first class and an additional 10 % are business class.
If the number of coach seats is defined, then the additional seats are 1/4 first class
and 3/4 business class
Source:
The Flight Optimization System Weight Estimation Method
Inputs:
vehicle - data dictionary with vehicle properties [dimensionless]
-.networks: data dictionary containing all propulsion properties
-.number_of_engines: number of engines
-.sealevel_static_thrust: thrust at sea level [N]
-.reference_area: wing surface area [m^2]
-.mass_properties.max_takeoff: MTOW [kilograms]
-.passengers: number of passengers in aircraft
-.design_mach_number: design mach number for cruise flight
-.design_range: design range of aircraft [nmi]
-.mass_properties.cargo: weight of cargo carried [kilograms]
Outputs:
output - data dictionary with weights [kilograms]
- output.oper_items: unusable fuel, engine oil, passenger service weight and cargo containers
- output.flight_crew: flight crew weight
- output.flight_attendants: flight attendants weight
- output.total: total operating items weight
Properties Used:
N/A
"""
network_name = list(vehicle.networks.keys())[0]
networks = vehicle.networks[network_name]
NENG = networks.number_of_engines
THRUST = networks.sealevel_static_thrust * 1 / Units.lbf
SW = vehicle.reference_area / Units.ft ** 2
FMXTOT = vehicle.mass_properties.max_zero_fuel / Units.lbs
DESRNG = vehicle.design_range / Units.nmi
VMAX = vehicle.design_mach_number
NTANK = 0 # number of fuel tanks
for wing in vehicle.wings:
NTANK += len(wing.Fuel_Tanks)
for fuselage in vehicle.fuselages:
NTANK += len(fuselage.Fuel_Tanks)
if NTANK == 0:
NTANK = 5
WUF = 11.5 * NENG * THRUST ** 0.2 + 0.07 * SW + 1.6 * NTANK * FMXTOT ** 0.28 # unusable fuel weight
WOIL = 0.082 * NENG * THRUST ** 0.65 # engine oil weight
if hasattr(vehicle.fuselages['fuselage'], 'number_coach_seats'):
NPT = vehicle.fuselages['fuselage'].number_coach_seats # number of economy passengers
NPF = (vehicle.passengers - NPT) / 4. # number of first clss passengers
NPB = vehicle.passengers - NPF - NPT # number of bussines passengers
else:
NPF = vehicle.passengers / 20.
NPB = vehicle.passengers / 10.
NPT = vehicle.passengers - NPF - NPB
vehicle.NPF = NPF
vehicle.NPB = NPB
vehicle.NPT = NPT
WSRV = (5.164 * NPF + 3.846 * NPB + 2.529 * NPT) * (DESRNG / VMAX) ** 0.255 # passenger service weight
WCON = 175 * np.ceil(vehicle.mass_properties.cargo / Units.lbs * 1. / 950) # cargo container weight
if vehicle.passengers >= 150:
NFLCR = 3 # number of flight crew
NGALC = 1 + np.floor(vehicle.passengers / 250.) # number of galley crew
else:
NFLCR = 2
NGALC = 0
if vehicle.passengers < 51:
NFLA = 1 # number of flight attendants, NSTU in FLOPS
else:
NFLA = 1 + np.floor(vehicle.passengers / 40.)
WFLAAB = NFLA * 155 + NGALC * 200 # flight attendant weight, WSTUAB in FLOPS
WFLCRB = NFLCR * 225 # flight crew and baggage weight
output = Data()
output.operating_items_less_crew = WUF * Units.lbs + WOIL * Units.lbs + WSRV * Units.lbs + WCON * Units.lbs
output.flight_crew = WFLCRB * Units.lbs
output.flight_attendants = WFLAAB * Units.lbs
output.total = output.operating_items_less_crew + output.flight_crew + \
output.flight_attendants
return output
| 2.578125
| 3
|
lessons 21/HomeWork/task7.py
|
zainllw0w/skillbox
| 0
|
12776133
|
<reponame>zainllw0w/skillbox
def f(*args):
summ = 0
for i in args:
if isinstance(i, list):
for s in i:
summ += f(s)
elif isinstance(i, tuple):
for s in i:
summ += s
else:
summ += i
return summ
a = [[1, 2, [3]], [1], 3]
b = (1, 2, 3, 4, 5)
print(f([[1, 2, [3]], [1], 3]))
| 3.3125
| 3
|
doc/img_normalization_test.py
|
Oizys18/IMG_CAPTION
| 0
|
12776134
|
<reponame>Oizys18/IMG_CAPTION
import numpy as np
import tensorflow as tf
from PIL import Image
import matplotlib.pyplot as plt
import os
def get_data_file(base_dir):
data_path = os.path.join(base_dir, 'datasets', 'test_datasets.npy')
data = np.load(data_path)
img_paths = data[:50, :1]
captions = data[:50, 2:]
train_images = np.squeeze(img_paths, axis=1)
train_images = [os.path.join(base_dir, 'datasets', 'images', img) for img in train_images]
train_captions = np.squeeze(captions, axis=1)
train_captions = ['<start>' + cap + ' <end>' for cap in train_captions]
train_images = list(set(train_images)) # 테스트를 위한 중복제거
print(train_images[:3])
print(train_captions[:3])
return train_images, train_captions
def image_load(image_path):
img = tf.io.read_file(image_path)
img = tf.image.decode_jpeg(img, channels=3)
img = tf.image.resize(img, (255, 255))
return img, image_path
# numpy 사용, 찬우 코드
def img_normalization_1(image_path):
img = Image.open(image_path)
img = img.resize((255, 255))
img2 = np.array(img)
min_max_image = (img - np.min(img)) / (np.max(img) - np.min(img))
mean_std_image = (img-img2.mean(axis=(0,1,2),keepdims=True))/np.std(img,axis=(0,1,2),keepdims=True)
return [img, min_max_image, mean_std_image]
# tensorflow 사용, 솔지 코드
def img_normalization_2(img):
# tf_img = tf.constant(img, dtype=tf.float32)
mean, var = tf.nn.moments(img, axes=[0, 1, 2])
nn_moments_image = (img - mean) / var**0.5
image_standardization_image = tf.image.per_image_standardization(img)
return [nn_moments_image, image_standardization_image]
base_dir = os.path.abspath('.')
train_images, train_captions = get_data_file(base_dir)
train_images = train_images[:2]
for train_image in train_images:
img, image_path = image_load(train_image)
images1 = img_normalization_1(image_path)
images2 = img_normalization_2(img)
titles = ['origin_img', 'min_max_image', 'mean_std_image', 'nn_moments_image', 'image_standardization_image']
images = images1 + images2
f = plt.figure()
for i, image in enumerate(images):
f.add_subplot(2, 3, i+1)
plt.title(titles[i])
plt.imshow(image)
plt.show()
| 3.078125
| 3
|
src/ui/setup_progress_screen.py
|
AndreWohnsland/CocktailBerry
| 1
|
12776135
|
<reponame>AndreWohnsland/CocktailBerry
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QMainWindow
from src.ui_elements.progressbarwindow import Ui_Progressbarwindow
from src.tabs.maker import interrupt_cocktail
from src.display_controller import DP_CONTROLLER
from src.dialog_handler import UI_LANGUAGE
class ProgressScreen(QMainWindow, Ui_Progressbarwindow):
""" Class for the Progressscreen during Cocktail making. """
def __init__(self, parent=None, cocktail_type="Cocktail"):
super().__init__()
self.setupUi(self)
self.setWindowFlags(Qt.Window | Qt.CustomizeWindowHint | Qt.WindowStaysOnTopHint)
DP_CONTROLLER.inject_stylesheet(self)
self.PBabbrechen.clicked.connect(interrupt_cocktail)
self.setWindowIcon(QIcon(parent.icon_path))
self.mainscreen = parent
UI_LANGUAGE.adjust_progress_screen(self, cocktail_type)
self.showFullScreen()
DP_CONTROLLER.set_display_settings(self)
| 2.140625
| 2
|
worker/routes/region_blacklist.py
|
thomasmckay/quay-config-worker
| 0
|
12776136
|
import logging
import json
from flask import request
from data.database import Repository, User
from data import model
from decorators import task_resources
logger = logging.getLogger(__name__)
@task_resources
def process(resources):
response = []
changed = True
for resource in resources:
p_namespace = resource["namespace"]
p_region = resource["region"]
p_state = resource["state"]
p_description = resource["description"]
user = model.user.get_user(p_user)
if user is None:
return {"failed": True, "msg": "User '%s' does not exist" % (p_user)}, 400
return {"failed": False, "changed": changed, "meta": response}, 200
| 2.4375
| 2
|
project_admin/urls.py
|
gedankenstuecke/qcycle-upload
| 0
|
12776137
|
from django.conf.urls import url
from . import views
app_name = 'project-admin'
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^consent-records/?$',
views.consent_records, name='consent-records'),
url(r'^config-general-settings/?$',
views.config_general_settings, name='config-general-settings'),
url(r'^config-oh-settings/?$',
views.config_oh_settings, name='config-oh-settings'),
url(r'^config-file-settings/?$',
views.config_file_settings, name='config-file-settings'),
url(r'^config-homepage-text/?$',
views.config_homepage_text, name='config-homepage-text'),
url(r'^login/?$', views.admin_login, name='login'),
url(r'^add-file/?$', views.add_file, name='add-file'),
url(r'^delete-file/(?P<file_id>\w+)/?$', views.delete_file,
name='delete-file'),
]
| 1.648438
| 2
|
TP_04/ejercicio_4/script_1/constants.py
|
AgustinNormand/recuperacion-de-informacion
| 0
|
12776138
|
<reponame>AgustinNormand/recuperacion-de-informacion
EMPTY_WORDS_PATH = "./palabrasvacias.txt"
# "palabrasvacias.txt"
# "emptywords.txt"
# None
DIRPATH = "/home/agustin/Desktop/Recuperacion/colecciones/RI-tknz-data/"
# "/home/agustin/Desktop/Recuperacion/colecciones/RI-tknz-data/"
# "/home/agustin/Desktop/Recuperacion/colecciones/wiki-small/"
# "/home/agustin/Desktop/Recuperacion/colecciones/wiki-txt/"
# "/home/agustin/Desktop/Recuperacion/colecciones/collection_test/TestCollection/"
# "/home/agustin/Desktop/Recuperacion/colecciones/collection_test_ER2/TestCollection/"
#Tokenizer term size
MIN_TERM_LENGTH = 3
MAX_TERM_LENGTH = 25
#
STRING_STORE_CRITERION = "MAX"
# MAX
# STATIC
# ONLY FOR STATIC STRING_STORE_CRITERION
DOCNAMES_SIZE = 50
TERMS_SIZE = 50
STEMMING_LANGUAGE = "spanish"
# Depends with the collection used
# english
# spanish
# None
#Evaluate RE or not (Email, Abbreviations, Dates, ...)
EXTRACT_ENTITIES = False
#
CORPUS_FILES_ENCODING = "UTF-8"
# Wiki-Txt = "ISO-8859-1"
# All = "UTF-8"
# True if doc_id is in doc_name. Example doc120.txt
ID_IN_DOCNAME = False
WORKERS_NUMBER = 10
INDEX_FILES_PATH = "./output/index_files/"
DOCNAMES_IDS_FILENAME = "docnames_ids"
VOCABULARY_FILENAME = "vocabulary"
INVERTED_INDEX_FILENAME = "inverted_index"
BIN_VOCABULARY_FILENAME = VOCABULARY_FILENAME+".bin"
BIN_INVERTED_INDEX_FILENAME = INVERTED_INDEX_FILENAME+".bin"
BIN_DOCNAMES_IDS_FILENAME = DOCNAMES_IDS_FILENAME+".bin"
PART_INVERTED_INDEX_PATH = "./output/partial_index_files/"
METADATA_FILE = "metadata.json"
DOCUMENT_LIMIT = 302
| 2
| 2
|
src/proto_net/scripts/train/run_train_all.py
|
okason97/handshape-recognition
| 3
|
12776139
|
<reponame>okason97/handshape-recognition
import configparser
from train_setup import train
configs = {
'lsa16': {
'data.train_way': [5],
'data.test_way': [5],
# done (1, 1, 1, 1) in previous experiments
'data.support_query': [(5, 5, 5, 5)],
'data.train_size': [0.33, 0.5, 0.64, 0.75],
'data.test_size': [0.25],
#'data.rotation_range': [0, 25],
#'data.width_shift_range': [0.1],
#'data.height_shift_range': [0.1],
#'data.horizontal_flip': [True, False],
'data.args': [(0, 0, 0, False), (10, 0.2, 0.2, True)],
'model.type': ['expr'],
'model.nb_layers': [4],
'model.nb_filters': [64],
'train.lr': [0.001]
},
'ciarp': {
'data.train_way': [5],
'data.test_way': [5],
# done (1, 1, 1, 1) in previous experiments
'data.support_query': [(5, 5, 5, 5)],
'data.train_size': [0.33, 0.5, 0.64, 0.75],
'data.test_size': [0.25],
#'data.rotation_range': [0, 25],
#'data.width_shift_range': [0.1],
#'data.height_shift_range': [0.1],
#'data.horizontal_flip': [True, False],
'data.args': [(0, 0, 0, False), (10, 0.2, 0.2, True)],
'model.type': ['expr'],
'model.nb_layers': [4],
'model.nb_filters': [64],
'train.lr': [0.001]
},
'rwth': {
'data.train_way': [20],
'data.test_way': [5],
'data.support_query': [(5, 5, 5, 5)],
'data.train_size': [0.33, 0.5, 0.64, 0.75],
'data.test_size': [0.25],
#'data.rotation_range': [0, 25],
#'data.width_shift_range': [0.1],
#'data.height_shift_range': [0.1],
#'data.horizontal_flip': [True, False],
'data.args': [(0, 0, 0, False), (10, 0.2, 0.2, True)],
'model.type': ['expr'],
'model.nb_layers': [4],
'model.nb_filters': [64],
'train.lr': [0.001]
}
}
def preprocess_config(c):
conf_dict = {}
int_params = ['data.train_way', 'data.test_way', 'data.train_support',
'data.test_support', 'data.train_query', 'data.test_query',
'data.episodes', 'data.gpu', 'data.cuda', 'model.z_dim',
'train.epochs', 'train.patience']
float_params = ['train.lr', 'data.rotation_range',
'data.width_shift_range', 'data.height_shift_range']
for param in c:
if param in int_params:
conf_dict[param] = int(c[param])
elif param in float_params:
conf_dict[param] = float(c[param])
else:
conf_dict[param] = c[param]
return conf_dict
for dataset in ['ciarp', 'lsa16', 'rwth']:
config_from_file = configparser.ConfigParser()
config_from_file.read("./src/proto_net/config/config_{}.conf".format(dataset))
ds_config = configs[dataset]
for train_size in ds_config['data.train_size']:
for test_size in ds_config['data.test_size']:
for train_way in ds_config['data.train_way']:
for test_way in ds_config['data.test_way']:
for train_support, train_query, test_support, test_query in ds_config['data.support_query']:
for rotation_range, width_shift_range, height_shift_range, horizontal_flip in ds_config['data.args']:
for model_type in ds_config['model.type']:
for nb_layers in ds_config['model.nb_layers']:
for nb_filters in ds_config['model.nb_filters']:
for lr in ds_config['train.lr']:
try:
custom_params = {
'data.train_way': train_way,
'data.train_support': train_support,
'data.train_query': train_query,
'data.test_way': test_way,
'data.test_support': test_support,
'data.test_query': test_query,
'data.train_size': train_size,
'data.test_size': test_size,
'data.rotation_range': rotation_range,
'data.width_shift_range': width_shift_range,
'data.height_shift_range': height_shift_range,
'data.horizontal_flip': horizontal_flip,
'model.type': model_type,
'model.nb_layers': nb_layers,
'model.nb_filters': nb_filters,
'train.lr': lr
}
preprocessed_config = preprocess_config({ **config_from_file['TRAIN'], **custom_params })
train(preprocessed_config)
except:
print("Error. Probably memory :c")
| 1.890625
| 2
|
src/data/raw_unpack.py
|
caheredia/california_housing_prices
| 0
|
12776140
|
<gh_stars>0
import os
import tarfile
from src.data.helpers import DATA_DIR
import pandas as pd
def unpack_tar_file(file: str = "cal_housing.tgz"):
"""
Unpack zipped file into present directory.
"""
file = os.path.join(DATA_DIR, "raw", file)
with tarfile.open(file) as tar:
tar.extractall(path="raw")
def load_raw_data_to_df(raw_dir: str = "raw") -> pd.DataFrame:
"""
Load raw data into pandas DataFrames.
"""
# extract columns
columns_file = os.path.join(
DATA_DIR, raw_dir, "CaliforniaHousing", "cal_housing.domain"
)
with open(columns_file) as file:
lines = file.readlines()
columns = [line.split(":")[0] for line in lines]
# load DataFrame
data_file = os.path.join(DATA_DIR, raw_dir, "CaliforniaHousing", "cal_housing.data")
raw_df = pd.read_csv(data_file, names=columns)
return raw_df
def get_raw_df():
"""
Returns the raw data as a DataFrame
"""
unpack_tar_file()
return load_raw_data_to_df()
| 3.25
| 3
|
georgebot/georgebot.py
|
zenoftrading/georgebot
| 2
|
12776141
|
import sys
import source as src
import database as db
def main(argv):
config_filename = argv[1]
config = src.read_config(config_filename)
websocket = src.authentication(config['exchange'])
try:
src.run(websocket,config)
except Exception as e:
print("Bot error: {}".format(e))
finally:
src.terminate(websocket)
if __name__ == '__main__':
if len(sys.argv) > 1:
db.create_tables()
main(sys.argv)
else:
print("Try to use: python georgebot.py <configuration_file.yaml>")
| 2.578125
| 3
|
Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/ihooks.py
|
bidhata/EquationGroupLeaks
| 9
|
12776142
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: ihooks.py
"""Import hook support.
Consistent use of this module will make it possible to change the
different mechanisms involved in loading modules independently.
While the built-in module imp exports interfaces to the built-in
module searching and loading algorithm, and it is possible to replace
the built-in function __import__ in order to change the semantics of
the import statement, until now it has been difficult to combine the
effect of different __import__ hacks, like loading modules from URLs
by rimport.py, or restricted execution by rexec.py.
This module defines three new concepts:
1) A "file system hooks" class provides an interface to a filesystem.
One hooks class is defined (Hooks), which uses the interface provided
by standard modules os and os.path. It should be used as the base
class for other hooks classes.
2) A "module loader" class provides an interface to search for a
module in a search path and to load it. It defines a method which
searches for a module in a single directory; by overriding this method
one can redefine the details of the search. If the directory is None,
built-in and frozen modules are searched instead.
Two module loader class are defined, both implementing the search
strategy used by the built-in __import__ function: ModuleLoader uses
the imp module's find_module interface, while HookableModuleLoader
uses a file system hooks class to interact with the file system. Both
use the imp module's load_* interfaces to actually load the module.
3) A "module importer" class provides an interface to import a
module, as well as interfaces to reload and unload a module. It also
provides interfaces to install and uninstall itself instead of the
default __import__ and reload (and unload) functions.
One module importer class is defined (ModuleImporter), which uses a
module loader instance passed in (by default HookableModuleLoader is
instantiated).
The classes defined here should be used as base classes for extended
functionality along those lines.
If a module importer class supports dotted names, its import_module()
must return a different value depending on whether it is called on
behalf of a "from ... import ..." statement or not. (This is caused
by the way the __import__ hook is used by the Python interpreter.) It
would also do wise to install a different version of reload().
"""
from warnings import warnpy3k, warn
warnpy3k('the ihooks module has been removed in Python 3.0', stacklevel=2)
del warnpy3k
import __builtin__
import imp
import os
import sys
__all__ = [
'BasicModuleLoader', 'Hooks', 'ModuleLoader', 'FancyModuleLoader',
'BasicModuleImporter', 'ModuleImporter', 'install', 'uninstall']
VERBOSE = 0
from imp import C_EXTENSION, PY_SOURCE, PY_COMPILED
from imp import C_BUILTIN, PY_FROZEN, PKG_DIRECTORY
BUILTIN_MODULE = C_BUILTIN
FROZEN_MODULE = PY_FROZEN
class _Verbose:
def __init__(self, verbose=VERBOSE):
self.verbose = verbose
def get_verbose(self):
return self.verbose
def set_verbose(self, verbose):
self.verbose = verbose
def note(self, *args):
if self.verbose:
self.message(*args)
def message(self, format, *args):
if args:
print format % args
else:
print format
class BasicModuleLoader(_Verbose):
"""Basic module loader.
This provides the same functionality as built-in import. It
doesn't deal with checking sys.modules -- all it provides is
find_module() and a load_module(), as well as find_module_in_dir()
which searches just one directory, and can be overridden by a
derived class to change the module search algorithm when the basic
dependency on sys.path is unchanged.
The interface is a little more convenient than imp's:
find_module(name, [path]) returns None or 'stuff', and
load_module(name, stuff) loads the module.
"""
def find_module(self, name, path=None):
if path is None:
path = [
None] + self.default_path()
for dir in path:
stuff = self.find_module_in_dir(name, dir)
if stuff:
return stuff
return
def default_path(self):
return sys.path
def find_module_in_dir(self, name, dir):
if dir is None:
return self.find_builtin_module(name)
else:
try:
return imp.find_module(name, [dir])
except ImportError:
return
return
def find_builtin_module(self, name):
if imp.is_builtin(name):
return (None, '', ('', '', BUILTIN_MODULE))
else:
if imp.is_frozen(name):
return (None, '', ('', '', FROZEN_MODULE))
return None
def load_module(self, name, stuff):
file, filename, info = stuff
try:
return imp.load_module(name, file, filename, info)
finally:
if file:
file.close()
class Hooks(_Verbose):
"""Hooks into the filesystem and interpreter.
By deriving a subclass you can redefine your filesystem interface,
e.g. to merge it with the URL space.
This base class behaves just like the native filesystem.
"""
def get_suffixes(self):
return imp.get_suffixes()
def new_module(self, name):
return imp.new_module(name)
def is_builtin(self, name):
return imp.is_builtin(name)
def init_builtin(self, name):
return imp.init_builtin(name)
def is_frozen(self, name):
return imp.is_frozen(name)
def init_frozen(self, name):
return imp.init_frozen(name)
def get_frozen_object(self, name):
return imp.get_frozen_object(name)
def load_source(self, name, filename, file=None):
return imp.load_source(name, filename, file)
def load_compiled(self, name, filename, file=None):
return imp.load_compiled(name, filename, file)
def load_dynamic(self, name, filename, file=None):
return imp.load_dynamic(name, filename, file)
def load_package(self, name, filename, file=None):
return imp.load_module(name, file, filename, ('', '', PKG_DIRECTORY))
def add_module(self, name):
d = self.modules_dict()
if name in d:
return d[name]
d[name] = m = self.new_module(name)
return m
def modules_dict(self):
return sys.modules
def default_path(self):
return sys.path
def path_split(self, x):
return os.path.split(x)
def path_join(self, x, y):
return os.path.join(x, y)
def path_isabs(self, x):
return os.path.isabs(x)
def path_exists(self, x):
return os.path.exists(x)
def path_isdir(self, x):
return os.path.isdir(x)
def path_isfile(self, x):
return os.path.isfile(x)
def path_islink(self, x):
return os.path.islink(x)
def openfile(self, *x):
return open(*x)
openfile_error = IOError
def listdir(self, x):
return os.listdir(x)
listdir_error = os.error
class ModuleLoader(BasicModuleLoader):
"""Default module loader; uses file system hooks.
By defining suitable hooks, you might be able to load modules from
other sources than the file system, e.g. from compressed or
encrypted files, tar files or (if you're brave!) URLs.
"""
def __init__(self, hooks=None, verbose=VERBOSE):
BasicModuleLoader.__init__(self, verbose)
self.hooks = hooks or Hooks(verbose)
def default_path(self):
return self.hooks.default_path()
def modules_dict(self):
return self.hooks.modules_dict()
def get_hooks(self):
return self.hooks
def set_hooks(self, hooks):
self.hooks = hooks
def find_builtin_module(self, name):
if self.hooks.is_builtin(name):
return (None, '', ('', '', BUILTIN_MODULE))
else:
if self.hooks.is_frozen(name):
return (None, '', ('', '', FROZEN_MODULE))
return None
def find_module_in_dir(self, name, dir, allow_packages=1):
if dir is None:
return self.find_builtin_module(name)
else:
if allow_packages:
fullname = self.hooks.path_join(dir, name)
if self.hooks.path_isdir(fullname):
stuff = self.find_module_in_dir('__init__', fullname, 0)
if stuff:
file = stuff[0]
if file:
file.close()
return (
None, fullname, ('', '', PKG_DIRECTORY))
for info in self.hooks.get_suffixes():
suff, mode, type = info
fullname = self.hooks.path_join(dir, name + suff)
try:
fp = self.hooks.openfile(fullname, mode)
return (
fp, fullname, info)
except self.hooks.openfile_error:
pass
return
def load_module(self, name, stuff):
file, filename, info = stuff
suff, mode, type = info
try:
if type == BUILTIN_MODULE:
return self.hooks.init_builtin(name)
if type == FROZEN_MODULE:
return self.hooks.init_frozen(name)
if type == C_EXTENSION:
m = self.hooks.load_dynamic(name, filename, file)
elif type == PY_SOURCE:
m = self.hooks.load_source(name, filename, file)
elif type == PY_COMPILED:
m = self.hooks.load_compiled(name, filename, file)
elif type == PKG_DIRECTORY:
m = self.hooks.load_package(name, filename, file)
else:
raise ImportError, 'Unrecognized module type (%r) for %s' % (
type, name)
finally:
if file:
file.close()
m.__file__ = filename
return m
class FancyModuleLoader(ModuleLoader):
"""Fancy module loader -- parses and execs the code itself."""
def load_module(self, name, stuff):
file, filename, (suff, mode, type) = stuff
realfilename = filename
path = None
if type == PKG_DIRECTORY:
initstuff = self.find_module_in_dir('__init__', filename, 0)
if not initstuff:
raise ImportError, 'No __init__ module in package %s' % name
initfile, initfilename, initinfo = initstuff
initsuff, initmode, inittype = initinfo
if inittype not in (PY_COMPILED, PY_SOURCE):
if initfile:
initfile.close()
raise ImportError, 'Bad type (%r) for __init__ module in package %s' % (
inittype, name)
path = [
filename]
file = initfile
realfilename = initfilename
type = inittype
if type == FROZEN_MODULE:
code = self.hooks.get_frozen_object(name)
elif type == PY_COMPILED:
import marshal
file.seek(8)
code = marshal.load(file)
elif type == PY_SOURCE:
data = file.read()
code = compile(data, realfilename, 'exec')
else:
return ModuleLoader.load_module(self, name, stuff)
m = self.hooks.add_module(name)
if path:
m.__path__ = path
m.__file__ = filename
try:
exec code in m.__dict__
except:
d = self.hooks.modules_dict()
if name in d:
del d[name]
raise
return m
class BasicModuleImporter(_Verbose):
"""Basic module importer; uses module loader.
This provides basic import facilities but no package imports.
"""
def __init__(self, loader=None, verbose=VERBOSE):
_Verbose.__init__(self, verbose)
self.loader = loader or ModuleLoader(None, verbose)
self.modules = self.loader.modules_dict()
return
def get_loader(self):
return self.loader
def set_loader(self, loader):
self.loader = loader
def get_hooks(self):
return self.loader.get_hooks()
def set_hooks(self, hooks):
return self.loader.set_hooks(hooks)
def import_module(self, name, globals={}, locals={}, fromlist=[]):
name = str(name)
if name in self.modules:
return self.modules[name]
stuff = self.loader.find_module(name)
if not stuff:
raise ImportError, 'No module named %s' % name
return self.loader.load_module(name, stuff)
def reload(self, module, path=None):
name = str(module.__name__)
stuff = self.loader.find_module(name, path)
if not stuff:
raise ImportError, 'Module %s not found for reload' % name
return self.loader.load_module(name, stuff)
def unload(self, module):
del self.modules[str(module.__name__)]
def install(self):
self.save_import_module = __builtin__.__import__
self.save_reload = __builtin__.reload
if not hasattr(__builtin__, 'unload'):
__builtin__.unload = None
self.save_unload = __builtin__.unload
__builtin__.__import__ = self.import_module
__builtin__.reload = self.reload
__builtin__.unload = self.unload
return
def uninstall(self):
__builtin__.__import__ = self.save_import_module
__builtin__.reload = self.save_reload
__builtin__.unload = self.save_unload
if not __builtin__.unload:
del __builtin__.unload
class ModuleImporter(BasicModuleImporter):
"""A module importer that supports packages."""
def import_module(self, name, globals=None, locals=None, fromlist=None, level=-1):
parent = self.determine_parent(globals, level)
q, tail = self.find_head_package(parent, str(name))
m = self.load_tail(q, tail)
if not fromlist:
return q
if hasattr(m, '__path__'):
self.ensure_fromlist(m, fromlist)
return m
def determine_parent(self, globals, level=-1):
if not globals or not level:
return
else:
pkgname = globals.get('__package__')
if pkgname is not None:
if not pkgname and level > 0:
raise ValueError, 'Attempted relative import in non-package'
else:
modname = globals.get('__name__')
if modname is None:
return
if '__path__' in globals:
pkgname = modname
else:
if '.' not in modname:
if level > 0:
raise ValueError, 'Attempted relative import in non-package'
globals['__package__'] = None
return
pkgname = modname.rpartition('.')[0]
globals['__package__'] = pkgname
if level > 0:
dot = len(pkgname)
for x in range(level, 1, -1):
try:
dot = pkgname.rindex('.', 0, dot)
except ValueError:
raise ValueError('attempted relative import beyond top-level package')
pkgname = pkgname[:dot]
try:
return sys.modules[pkgname]
except KeyError:
if level < 1:
warn("Parent module '%s' not found while handling absolute import" % pkgname, RuntimeWarning, 1)
return
raise SystemError, "Parent module '%s' not loaded, cannot perform relative import" % pkgname
return
def find_head_package(self, parent, name):
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i + 1:]
else:
head = name
tail = ''
if parent:
qname = '%s.%s' % (parent.__name__, head)
else:
qname = head
q = self.import_it(head, qname, parent)
if q:
return (q, tail)
else:
if parent:
qname = head
parent = None
q = self.import_it(head, qname, parent)
if q:
return (q, tail)
raise ImportError, "No module named '%s'" % qname
return
def load_tail(self, q, tail):
m = q
while tail:
i = tail.find('.')
if i < 0:
i = len(tail)
head, tail = tail[:i], tail[i + 1:]
mname = '%s.%s' % (m.__name__, head)
m = self.import_it(head, mname, m)
if not m:
raise ImportError, "No module named '%s'" % mname
return m
def ensure_fromlist--- This code section failed: ---
494 0 SETUP_LOOP 186 'to 189'
3 LOAD_FAST 2 'fromlist'
6 GET_ITER
7 FOR_ITER 178 'to 188'
10 STORE_FAST 4 'sub'
495 13 LOAD_FAST 4 'sub'
16 LOAD_CONST 1 '*'
19 COMPARE_OP 2 '=='
22 POP_JUMP_IF_FALSE 92 'to 92'
496 25 LOAD_FAST 3 'recursive'
28 POP_JUMP_IF_TRUE 7 'to 7'
497 31 SETUP_EXCEPT 13 'to 47'
498 34 LOAD_FAST 1 'm'
37 LOAD_ATTR 0 '__all__'
40 STORE_FAST 5 'all'
43 POP_BLOCK
44 JUMP_FORWARD 17 'to 64'
47_0 COME_FROM '31'
499 47 DUP_TOP
48 LOAD_GLOBAL 1 'AttributeError'
51 COMPARE_OP 10 'exception match'
54 POP_JUMP_IF_FALSE 63 'to 63'
57 POP_TOP
58 POP_TOP
59 POP_TOP
500 60 JUMP_ABSOLUTE 86 'to 86'
63 END_FINALLY
64_0 COME_FROM '44'
502 64 LOAD_FAST 0 'self'
67 LOAD_ATTR 2 'ensure_fromlist'
70 LOAD_FAST 1 'm'
73 LOAD_FAST 5 'all'
76 LOAD_CONST 2 1
79 CALL_FUNCTION_3 3
82 POP_TOP
83 JUMP_BACK 7 'to 7'
86_0 COME_FROM '63'
503 86 CONTINUE 7 'to 7'
89 JUMP_FORWARD 0 'to 92'
92_0 COME_FROM '89'
504 92 LOAD_FAST 4 'sub'
95 LOAD_CONST 1 '*'
98 COMPARE_OP 3 '!='
101 POP_JUMP_IF_FALSE 7 'to 7'
104 LOAD_GLOBAL 3 'hasattr'
107 LOAD_FAST 1 'm'
110 LOAD_FAST 4 'sub'
113 CALL_FUNCTION_2 2
116 UNARY_NOT
117_0 COME_FROM '101'
117 POP_JUMP_IF_FALSE 7 'to 7'
505 120 LOAD_CONST 3 '%s.%s'
123 LOAD_FAST 1 'm'
126 LOAD_ATTR 4 '__name__'
129 LOAD_FAST 4 'sub'
132 BUILD_TUPLE_2 2
135 BINARY_MODULO
136 STORE_FAST 6 'subname'
506 139 LOAD_FAST 0 'self'
142 LOAD_ATTR 5 'import_it'
145 LOAD_FAST 4 'sub'
148 LOAD_FAST 6 'subname'
151 LOAD_FAST 1 'm'
154 CALL_FUNCTION_3 3
157 STORE_FAST 7 'submod'
507 160 LOAD_FAST 7 'submod'
163 POP_JUMP_IF_TRUE 185 'to 185'
508 166 LOAD_GLOBAL 6 'ImportError'
169 LOAD_CONST 4 "No module named '%s'"
172 LOAD_FAST 6 'subname'
175 BINARY_MODULO
176 RAISE_VARARGS_2 2
179 JUMP_ABSOLUTE 185 'to 185'
182 JUMP_BACK 7 'to 7'
185 JUMP_BACK 7 'to 7'
188 POP_BLOCK
189_0 COME_FROM '0'
Parse error at or near `COME_FROM' instruction at offset 86_0
def import_it(self, partname, fqname, parent, force_load=0):
if not partname:
return parent
else:
if not force_load:
try:
return self.modules[fqname]
except KeyError:
pass
try:
path = parent and parent.__path__
except AttributeError:
return None
partname = str(partname)
stuff = self.loader.find_module(partname, path)
if not stuff:
return None
fqname = str(fqname)
m = self.loader.load_module(fqname, stuff)
if parent:
setattr(parent, partname, m)
return m
def reload(self, module):
name = str(module.__name__)
if '.' not in name:
return self.import_it(name, name, None, force_load=1)
else:
i = name.rfind('.')
pname = name[:i]
parent = self.modules[pname]
return self.import_it(name[i + 1:], name, parent, force_load=1)
default_importer = None
current_importer = None
def install(importer=None):
global current_importer
current_importer = importer or default_importer or ModuleImporter()
current_importer.install()
def uninstall():
current_importer.uninstall()
| 2.4375
| 2
|
dags/poc_dag.py
|
jdpinedaj/poc_airflow
| 0
|
12776143
|
#######################
##! 1. Importing modules
#######################
from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.providers.google.cloud.operators.bigquery import BigQueryCheckOperator, BigQueryExecuteQueryOperator
from airflow.providers.google.cloud.transfers.gcs_to_bigquery import GCSToBigQueryOperator
from airflow.operators.python import PythonOperator
import requests
import json
#######################
##! 2. Default arguments
#######################
default_args = {
'owner': 'jdpinedaj',
'depends_on_past': False,
'email': ['<EMAIL>'],
'email_on_failure': True,
'email_on_retry': False,
'retries': 2,
'retry_delay': timedelta(minutes=2),
}
# It is possible to store all those variables as "Variables" within airflow
URL_AIRLINES = "https://media.githubusercontent.com/media/dpinedaj/airflights-kaggle-data/master/airlines.csv"
URL_AIRPORTS = "https://media.githubusercontent.com/media/dpinedaj/airflights-kaggle-data/master/airports.csv"
URL_FLIGHTS = "https://media.githubusercontent.com/media/dpinedaj/airflights-kaggle-data/master/flights.csv"
GCP_CONNECTION_ID = 'google_cloud_default'
PROJECT_ID = 'aa-study'
GCP_LOCATION = 'us-central1'
MY_DATASET = 'sandbox_jpineda'
GS_BUCKET = 'jpineda'
SCHEDULE_INTERVAL = '@once'
URL_CF1 = 'https://us-central1-aa-study.cloudfunctions.net/get-data-upload-to-gcs'
URL_CF2 = 'https://us-central1-aa-study.cloudfunctions.net/raw-schema-processed'
# Additional variables
date = datetime.now().strftime("%Y_%m_%d")
# Functions
def get_data_upload_to_gcs(bucket_name, source_url, destiny_path):
url = URL_CF1
values = {
'bucket_name': bucket_name,
'source_url': source_url,
'destiny_path': destiny_path
}
response = requests.post(url, json=values)
if response.status_code != 200:
raise Exception(
'Bad response from application: {!r} / {!r} / {!r}'.format(
response.status_code, response.headers, response.text))
else:
return response.text
def raw_schema_processed(bucket_name, origin_path, destiny_path, schema_name):
url = URL_CF2,
values = {
'bucket_name': bucket_name,
'origin_path': origin_path,
'destiny_path': destiny_path,
'schema_name': schema_name
}
response = requests.post(url[0], json=values)
if response.status_code != 200:
raise Exception(
'Bad response from application: {!r} / {!r} / {!r}'.format(
response.status_code, response.headers, response.text))
else:
return response.text
#######################
##! 3. Instantiate a DAG
#######################
dag = DAG(dag_id='PoC_Juan_Pineda_DAG_vf',
description='PoC de Juan Pineda',
start_date=datetime.now(),
schedule_interval=SCHEDULE_INTERVAL,
concurrency=5,
max_active_runs=1,
default_args=default_args)
#######################
##! 4. Tasks
#######################
#? 4.1. Starting pipeline
start_pipeline = DummyOperator(task_id='start_pipeline', dag=dag)
#? 4.2. Download data from kaggle in parquet, and upload it into gcs using CLOUD FUNCTIONS
download_airlines_data = PythonOperator(task_id='download_airlines_data',
python_callable=get_data_upload_to_gcs,
op_kwargs={
"bucket_name":
GS_BUCKET,
"source_url":
URL_AIRLINES,
"destiny_path":
f"raw/{date}_airlines.parquet"
},
dag=dag)
download_airports_data = PythonOperator(task_id='download_airports_data',
python_callable=get_data_upload_to_gcs,
op_kwargs={
"bucket_name":
GS_BUCKET,
"source_url":
URL_AIRPORTS,
"destiny_path":
f"raw/{date}_airports.parquet"
},
dag=dag)
download_flights_data = PythonOperator(task_id='download_flights_data',
python_callable=get_data_upload_to_gcs,
op_kwargs={
"bucket_name":
GS_BUCKET,
"source_url":
URL_FLIGHTS,
"destiny_path":
f"raw/{date}_flights.parquet"
},
dag=dag)
#? 4.3. Change schema to raw_data and load it again in processed_data
processing_airlines_data = PythonOperator(
task_id='processing_airlines_data',
python_callable=raw_schema_processed,
op_kwargs={
"bucket_name": GS_BUCKET,
"origin_path": f"raw/{date}_airlines.parquet",
"destiny_path": f"processed/{date}_airlines.parquet",
"schema_name": "airlines_schema.json"
},
dag=dag)
processing_airports_data = PythonOperator(
task_id='processing_airports_data',
python_callable=raw_schema_processed,
op_kwargs={
"bucket_name": GS_BUCKET,
"origin_path": f"raw/{date}_airports.parquet",
"destiny_path": f"processed/{date}_airports.parquet",
"schema_name": "airports_schema.json"
},
dag=dag)
processing_flights_data = PythonOperator(
task_id='processing_flights_data',
python_callable=raw_schema_processed,
op_kwargs={
"bucket_name": GS_BUCKET,
"origin_path": f"raw/{date}_flights.parquet",
"destiny_path": f"processed/{date}_flights.parquet",
"schema_name": "flights_schema.json"
},
dag=dag)
#? 4.4. Load data from gcs to bigquery
load_airlines_data = GCSToBigQueryOperator(
task_id='load_airlines_data',
bucket=GS_BUCKET,
source_objects=[f"processed/{date}_airlines.parquet"],
destination_project_dataset_table=
f'{PROJECT_ID}:{MY_DATASET}.airlines_data',
source_format='parquet',
write_disposition='WRITE_TRUNCATE',
skip_leading_rows=1,
autodetect=True,
location=GCP_LOCATION,
dag=dag)
load_airports_data = GCSToBigQueryOperator(
task_id='load_airports_data',
bucket=GS_BUCKET,
source_objects=[f"processed/{date}_airports.parquet"],
destination_project_dataset_table=
f'{PROJECT_ID}:{MY_DATASET}.airports_data',
source_format='parquet',
write_disposition='WRITE_TRUNCATE',
skip_leading_rows=1,
autodetect=True,
location=GCP_LOCATION,
dag=dag)
load_flights_data = GCSToBigQueryOperator(
task_id='load_flights_data',
bucket=GS_BUCKET,
source_objects=[f"processed/{date}_flights.parquet"],
destination_project_dataset_table=f'{PROJECT_ID}:{MY_DATASET}.flights_data',
source_format='parquet',
write_disposition='WRITE_TRUNCATE',
skip_leading_rows=1,
autodetect=True,
location=GCP_LOCATION,
dag=dag)
#? 4.5. Data check
check_airlines = BigQueryCheckOperator(task_id='check_airlines',
use_legacy_sql=False,
location=GCP_LOCATION,
bigquery_conn_id=GCP_CONNECTION_ID,
params={
'project_id': PROJECT_ID,
'my_dataset': MY_DATASET
},
sql='''
#standardSQL
SELECT count(*) AS num_airlines
FROM `{{ params.project_id }}.{{ params.my_dataset }}.airlines_data`
''',
dag=dag)
check_airports = BigQueryCheckOperator(task_id='check_airports',
use_legacy_sql=False,
location=GCP_LOCATION,
bigquery_conn_id=GCP_CONNECTION_ID,
params={
'project_id': PROJECT_ID,
'my_dataset': MY_DATASET
},
sql='''
#standardSQL
SELECT count(*) AS num_airports
FROM `{{ params.project_id }}.{{ params.my_dataset }}.airports_data`
''',
dag=dag)
check_flights = BigQueryCheckOperator(task_id='check_flights',
use_legacy_sql=False,
location=GCP_LOCATION,
bigquery_conn_id=GCP_CONNECTION_ID,
params={
'project_id': PROJECT_ID,
'my_dataset': MY_DATASET
},
sql='''
#standardSQL
SELECT count(*) AS num_flights
FROM `{{ params.project_id }}.{{ params.my_dataset }}.flights_data`
''',
dag=dag)
loaded_data_to_bigquery = DummyOperator(task_id='loaded_data', dag=dag)
#? 4.6. Generating a view
check_unified_view = BigQueryExecuteQueryOperator(
task_id='check_unified_view',
use_legacy_sql=False,
location=GCP_LOCATION,
bigquery_conn_id=GCP_CONNECTION_ID,
destination_dataset_table='{0}.{1}.unified_table'.format(
PROJECT_ID, MY_DATASET),
write_disposition="WRITE_TRUNCATE",
allow_large_results=True,
sql='''
#standardSQL
WITH flights_airlines AS (
SELECT
flights.year,
flights.month,
flights.day,
flights.flight_number,
flights.origin_airport,
flights.airline as airline_iata_code,
airlines.airline
FROM `{0}.{1}.flights_data` flights
LEFT JOIN `{0}.{1}.airlines_data` airlines
ON flights.airline = airlines.iata_code
)
SELECT
year,
month,
day,
airline_iata_code,
airline,
flight_number,
origin_airport,
airports.airport AS name_airport,
airports.city,
airports.state,
airports.latitude,
airports.longitude
FROM flights_airlines
INNER JOIN `{0}.{1}.airports_data` airports
ON flights_airlines.origin_airport = airports.iata_code
'''.format(PROJECT_ID, MY_DATASET),
dag=dag)
#? 4.7. Finishing pipeline
finish_pipeline = DummyOperator(task_id='finish_pipeline', dag=dag)
#######################
##! 5. Setting up dependencies
#######################
start_pipeline >> [
download_airlines_data, download_airports_data, download_flights_data
]
download_airlines_data >> processing_airlines_data >> load_airlines_data >> check_airlines
download_airports_data >> processing_airports_data >> load_airports_data >> check_airports
download_flights_data >> processing_flights_data >> load_flights_data >> check_flights
[check_airlines, check_airports, check_flights
] >> loaded_data_to_bigquery >> check_unified_view >> finish_pipeline
| 2.4375
| 2
|
tests/test_override_threading.py
|
wordlesstruth/stomp.py
| 0
|
12776144
|
import logging
import time
from concurrent.futures import ThreadPoolExecutor
import pytest
import stomp
from stomp.listener import TestListener
from .testutils import *
executor = ThreadPoolExecutor()
def create_thread(fc):
f = executor.submit(fc)
print("Created future %s on executor %s" % (f, executor))
return f
class ReconnectListener(TestListener):
def __init__(self, conn):
TestListener.__init__(self, "123", True)
self.conn = conn
def on_receiver_loop_ended(self, *args):
if self.conn:
c = self.conn
self.conn = None
c.connect(get_default_user(), get_default_password(), wait=True)
c.disconnect()
@pytest.fixture
def conn():
conn = stomp.Connection(get_default_host())
# check thread override here
conn.transport.override_threading(create_thread)
listener = ReconnectListener(conn)
conn.set_listener("testlistener", listener)
conn.connect(get_default_user(), get_default_password(), wait=True)
yield conn
class TestThreadingOverride(object):
def test_threading(self, conn):
listener = conn.get_listener("testlistener")
queuename = "/queue/test1-%s" % listener.timestamp
conn.subscribe(destination=queuename, id=1, ack="auto")
conn.send(body="this is a test", destination=queuename, receipt="123")
validate_send(conn, 1, 1, 0)
logging.info("first disconnect")
conn.disconnect(receipt="112233")
logging.info("reconnecting")
conn.connect(get_default_user(), get_default_password(), wait=True)
logging.info("second disconnect")
conn.disconnect()
| 2.1875
| 2
|
niaaml_gui/widgets/optimization_widget.py
|
zStupan/NiaAML-GUI
| 2
|
12776145
|
from PyQt5.QtWidgets import QComboBox, QLineEdit, QLabel, QHBoxLayout, QVBoxLayout, QGridLayout, QTabWidget, QFileDialog, QCheckBox, QPushButton
from PyQt5 import QtGui
from PyQt5.QtGui import QIcon
from PyQt5 import QtCore
from niapy.util.factory import _algorithm_options
from niaaml_gui.widgets.list_widget_custom import ListWidgetCustom
from niaaml_gui.widgets.base_main_widget import BaseMainWidget
from niaaml_gui.windows import ProcessWindow
from niaaml_gui.process_window_data import ProcessWindowData
from niaaml.classifiers import ClassifierFactory
from niaaml.preprocessing.feature_selection import FeatureSelectionAlgorithmFactory
from niaaml.preprocessing.feature_transform import FeatureTransformAlgorithmFactory
from niaaml.fitness import FitnessFactory
from niaaml.preprocessing.encoding import EncoderFactory
from niaaml.preprocessing.imputation import ImputerFactory
import qtawesome as qta
class OptimizationWidget(BaseMainWidget):
__niaamlFeatureSelectionAlgorithms = FeatureSelectionAlgorithmFactory().get_name_to_classname_mapping()
__niaamlFeatureTransformAlgorithms = FeatureTransformAlgorithmFactory().get_name_to_classname_mapping()
__niaamlClassifiers = ClassifierFactory().get_name_to_classname_mapping()
__niaamlFitnessFunctions = FitnessFactory().get_name_to_classname_mapping()
__niaamlEncoders = EncoderFactory().get_name_to_classname_mapping()
__niaamlImputers = ImputerFactory().get_name_to_classname_mapping()
__niapyAlgorithmsList = list(_algorithm_options().keys())
__niaamlFeatureSelectionAlgorithmsList = list(__niaamlFeatureSelectionAlgorithms.keys())
__niaamlFeatureTransformAlgorithmsList = list(__niaamlFeatureTransformAlgorithms.keys())
__niaamlClassifiersList = list(__niaamlClassifiers.keys())
__niaamlFitnessFunctionsList = list(__niaamlFitnessFunctions.keys())
__niaamlEncodersList = list(__niaamlEncoders.keys())
__niaamlImputersList = list(__niaamlImputers.keys())
def __init__(self, parent, is_v1 = False, *args, **kwargs):
self.__niapyAlgorithmsList.sort()
self.__niaamlFeatureSelectionAlgorithmsList.sort()
self.__niaamlFeatureTransformAlgorithmsList.sort()
self.__niaamlClassifiersList.sort()
self.__is_v1 = is_v1
super().__init__(parent, *args, **kwargs)
fileLayout = QHBoxLayout(self._parent)
selectFileBar = QHBoxLayout(self._parent)
selectFileBar.setSpacing(0)
selectFileBar.setContentsMargins(0, 5, 5, 5)
fNameLine = QLineEdit(self._parent)
fNameLine.setObjectName('csvFile')
fNameLine.setPlaceholderText('Select a CSV dataset file...')
fNameLine.setReadOnly(True)
font = fNameLine.font()
font.setPointSize(12)
fNameLine.setFont(font)
selectFileBar.addWidget(fNameLine)
editBtn = self._createButton(None, self._editCSVFile, 'editCSVButton', qta.icon('fa5.edit'))
editBtn.setEnabled(False)
selectFileBar.addWidget(editBtn)
selectFileBar.addWidget(self._createButton('Select file', self._openCSVFile))
checkBox = QCheckBox('CSV has header')
checkBox.setObjectName('csv')
checkBox.setFont(font)
fileLayout.addItem(selectFileBar)
fileLayout.addWidget(checkBox)
encoders = self.__createComboBox('Categorical features\' encoder:', self.__niaamlEncodersList, 'encoders')
imputers = self.__createComboBox('Missing features\' imputer:', self.__niaamlImputersList, 'imputers')
hBoxLayout = QHBoxLayout(self._parent)
hBoxLayout.setContentsMargins(0, 0, 0, 0)
vBoxLayout = QVBoxLayout(self._parent)
vBoxLayout.setContentsMargins(0, 34, 0, 0)
h1BoxLayout = QHBoxLayout(self._parent)
h1BoxLayout.setContentsMargins(0, 0, 0, 0)
fsasBox = self.__createGridLayoutBox((0, 5, 3, 5), True)
fsasList = self.__createListWidget([], None, 'fsasList')
fsasBox.addWidget(fsasList)
h1BoxLayout.addItem(fsasBox)
ftasBox = self.__createGridLayoutBox((3, 5, 3, 5), True)
ftasList = self.__createListWidget([], None, 'ftasList')
ftasBox.addWidget(ftasList)
h1BoxLayout.addItem(ftasBox)
classifiers = self.__createGridLayoutBox((3, 5, 5, 5), True)
classifiersList = self.__createListWidget([], None, 'classifiersList')
classifiers.addWidget(classifiersList)
h1BoxLayout.addItem(classifiers)
settingsBox = self.__createGridLayoutBox((0, 0, 5, 5), False, 'transparent')
settingsBox.setVerticalSpacing(10)
optAlgosLabel = 'Optimization Algorithm (components selection):' if not self.__is_v1 else 'Optimization Algorithm:'
optAlgos = self.__createComboBox(optAlgosLabel, self.__niapyAlgorithmsList, 'optAlgos')
optAlgosInner = self.__createComboBox('Optimization Algorithm (parameter tuning) - same as first if not selected:', [*['None'], *self.__niapyAlgorithmsList], 'optAlgosInner')
validator = QtGui.QRegExpValidator(QtCore.QRegExp('[1-9][0-9]*'))
popSizeLabel = 'Population size (components selection):' if not self.__is_v1 else 'Population size:'
popSize = self.__createTextInput(popSizeLabel, 'popSize', validator)
popSizeInner = self.__createTextInput('Population size (parameter tuning):', 'popSizeInner', validator)
numEvalsLabel = 'Number of evaluations (components selection):' if not self.__is_v1 else 'Number of evaluations'
numEvals = self.__createTextInput(numEvalsLabel, 'numEvals', validator)
numEvalsInner = self.__createTextInput('Number of evaluations (parameter tuning):', 'numEvalsInner', validator)
fitFuncs = self.__createComboBox('Fitness Function:', self.__niaamlFitnessFunctionsList, 'fitFuncs')
selectOutputFolderBar = QHBoxLayout(self._parent)
selectOutputFolderBar.setSpacing(0)
foNameLine = QLineEdit(self._parent)
foNameLine.setObjectName('outputFolder')
foNameLine.setPlaceholderText('Select pipeline output folder...')
foNameLine.setReadOnly(True)
font = foNameLine.font()
font.setPointSize(12)
foNameLine.setFont(font)
selectOutputFolderBar.addWidget(foNameLine)
selectOutputFolderBar.addWidget(self._createButton('Select folder', self.__selectDirectory))
settingsBox.addItem(optAlgos)
if not self.__is_v1:
settingsBox.addItem(optAlgosInner)
settingsBox.addItem(popSize)
if not self.__is_v1:
settingsBox.addItem(popSizeInner)
settingsBox.addItem(numEvals)
if not self.__is_v1:
settingsBox.addItem(numEvalsInner)
settingsBox.addItem(fitFuncs)
settingsBox.addItem(selectOutputFolderBar)
confirmBar = QHBoxLayout(self._parent)
confirmBar.setContentsMargins(5, 5, 5, 5)
confirmBar.addStretch()
confirmBar.addWidget(self._createButton('Start optimization', self.__runOptimize))
vBoxLayout.addItem(fileLayout)
vBoxLayout.addItem(encoders)
vBoxLayout.addItem(imputers)
vBoxLayout.addItem(h1BoxLayout)
vBoxLayout.addItem(settingsBox)
vBoxLayout.addItem(confirmBar)
exploreBox = self.__createGridLayoutBox((0, 0, 0, 0), False)
exploreBox.addWidget(self.__createTabs(fsasList, ftasList, classifiersList))
hBoxLayout.addItem(exploreBox)
hBoxLayout.addItem(vBoxLayout)
hBoxLayout.setStretchFactor(exploreBox, 1)
hBoxLayout.setStretchFactor(vBoxLayout, 2)
self.setLayout(hBoxLayout)
def __createComboBox(self, label, items, name):
comboBox = QVBoxLayout()
comboBox.setSpacing(5)
label = QLabel(label, self._parent)
font = label.font()
font.setPointSize(12)
label.setFont(font)
cb = QComboBox()
cb.setObjectName(name)
cb.setFont(font)
for k in items:
cb.addItem(k)
comboBox.addWidget(label)
comboBox.addWidget(cb)
return comboBox
def __createTextInput(self, label, name, validator=None):
textBox = QVBoxLayout()
textBox.setSpacing(5)
label = QLabel(label, self._parent)
font = label.font()
font.setPointSize(12)
label.setFont(font)
tb = QLineEdit(self._parent)
tb.setObjectName(name)
tb.setFont(font)
textBox.addWidget(label)
textBox.addWidget(tb)
if validator is not None:
tb.setValidator(validator)
return textBox
def __createGridLayoutBox(self, tupleMargins, visibleBorder, background_color = '#fff'):
l = QGridLayout()
l.setContentsMargins(*tupleMargins)
return l
def __createListWidget(self, items, targetBox = None, name = None):
listWidget = ListWidgetCustom(items, targetBox, name)
font = listWidget.font()
font.setPointSize(12)
listWidget.setFont(font)
return listWidget
def __createTabs(self, fsasList, ftasList, classifiersList):
tabs = QTabWidget(self._parent)
fsas = self.__createListWidget(self.__niaamlFeatureSelectionAlgorithmsList, fsasList)
fsasList.setTarget(fsas)
tabs.addTab(fsas, 'Feature Selection Algorithms')
ftas = self.__createListWidget(self.__niaamlFeatureTransformAlgorithmsList, ftasList)
ftasList.setTarget(ftas)
tabs.addTab(ftas, 'Feature Selection Algorithms')
clas = self.__createListWidget(self.__niaamlClassifiersList, classifiersList)
classifiersList.setTarget(clas)
tabs.addTab(clas, 'Classifiers')
font = tabs.font()
font.setPointSize(10)
tabs.setFont(font)
tabs.setStyleSheet("QTabBar::tab { height: 40px; }")
return tabs
def __selectDirectory(self):
fname = str(QFileDialog.getExistingDirectory(parent=self._parent, caption='Select Directory'))
self.findChild(QLineEdit, 'outputFolder').setText(fname)
def __runOptimize(self):
err = ''
csvSrc = self.findChild(QLineEdit, 'csvFile').text()
if self._isNoneOrWhiteSpace(csvSrc):
err += 'Select CSV dataset file.\n'
encoderName = self.__niaamlEncoders[str(self.findChild(QComboBox, 'encoders').currentText())]
imputerName = self.__niaamlImputers[str(self.findChild(QComboBox, 'imputers').currentText())]
optAlgName = str(self.findChild(QComboBox, 'optAlgos').currentText())
if not self.__is_v1:
optAlgInnerName = str(self.findChild(QComboBox, 'optAlgosInner').currentText())
if optAlgInnerName == 'None':
optAlgInnerName = optAlgName
popSize = self.findChild(QLineEdit, 'popSize').text()
if self._isNoneOrWhiteSpace(popSize):
err += 'Select population size.\n'
else:
try:
popSize = int(popSize)
except:
err += 'Invalid population size value.\n'
if not self.__is_v1:
popSizeInner = self.findChild(QLineEdit, 'popSizeInner').text()
if self._isNoneOrWhiteSpace(popSizeInner):
err += 'Select inner population size.\n'
else:
try:
popSizeInner = int(popSizeInner)
except:
err += 'Invalid inner population size value.\n'
numEvals = self.findChild(QLineEdit, 'numEvals').text()
if self._isNoneOrWhiteSpace(numEvals):
err += 'Select number of evaluations.\n'
else:
try:
numEvals = int(numEvals)
except:
err += 'Invalid number of evaluations.\n'
if not self.__is_v1:
numEvalsInner = self.findChild(QLineEdit, 'numEvalsInner').text()
if self._isNoneOrWhiteSpace(numEvalsInner):
err += 'Select number of inner evaluations.\n'
else:
try:
numEvalsInner = int(numEvalsInner)
except:
err += 'Invalid number of inner evaluations.\n'
fsasList = self.findChild(ListWidgetCustom, 'fsasList')
fsas = [self.__niaamlFeatureSelectionAlgorithms[fsasList.item(i).text()] for i in range(fsasList.count())]
ftasList = self.findChild(ListWidgetCustom, 'ftasList')
ftas = [self.__niaamlFeatureTransformAlgorithms[ftasList.item(i).text()] for i in range(ftasList.count())]
clsList = self.findChild(ListWidgetCustom, 'classifiersList')
classifiers = [self.__niaamlClassifiers[clsList.item(i).text()] for i in range(clsList.count())]
if len(classifiers) == 0:
err += 'Select at least one classifier.\n'
fitnessFunctionName = self.__niaamlFitnessFunctions[str(self.findChild(QComboBox, 'fitFuncs').currentText())]
outputFolder = self.findChild(QLineEdit, 'outputFolder').text()
if self._isNoneOrWhiteSpace(outputFolder):
err += 'Select an output directory.\n'
if not self._isNoneOrWhiteSpace(err):
self._parent.errorMessage.setText(err)
self._parent.errorMessage.show()
return
if not self.__is_v1:
self._processWindow = ProcessWindow(
self._parent,
ProcessWindowData(
True,
csvSrc,
self.findChild(QCheckBox, 'csv').isChecked(),
encoderName,
imputerName,
optAlgName,
optAlgInnerName,
popSize,
popSizeInner,
numEvals,
numEvalsInner,
fsas,
ftas,
classifiers,
fitnessFunctionName,
outputFolder
)
)
else:
self._processWindow = ProcessWindow(
self._parent,
ProcessWindowData(
'v1',
csvSrc,
self.findChild(QCheckBox, 'csv').isChecked(),
encoderName,
imputerName,
optAlgName=optAlgName,
popSize=popSize,
numEvals=numEvals,
fsas=fsas,
ftas=ftas,
classifiers=classifiers,
fitnessFunctionName=fitnessFunctionName,
outputFolder=outputFolder
)
)
self._processWindow.show()
| 1.664063
| 2
|
code/generate_mask_weights.py
|
bhuvi3/data590_capstone_project
| 4
|
12776146
|
import os
import numpy as np
import tensorflow as tf
def save_weights_resnet152_10channel():
# Initialize configuration
required_input_shape = (7, 7, 10, 64)
output_file_prefix = "resnet152_10channel"
# Initialize a model of choice
model_pretrained_conv = tf.keras.applications.ResNet152(weights='imagenet', include_top=False)
# Get the weights of the model
weights = model_pretrained_conv.get_weights()
input_layer_weights = weights[0]
print("Changing weights of the input layer from", input_layer_weights.shape, "to", required_input_shape)
# Change the weights to desired shape
new_weights = np.random.normal(0, 0.001, required_input_shape)
new_weights[:, :, :3, :] = input_layer_weights
new_weights[:, :, 3:6, :] = input_layer_weights
new_weights[:, :, 6:9, :] = input_layer_weights
weights[0] = new_weights
# Save the new weights
np.save(os.path.join(os.getcwd(), 'data', output_file_prefix + "_mask_weights.npy"), weights)
def save_weights_resnet152_6channel(allpretrained=False):
# Initialize configuration
required_input_shape = (7, 7, 6, 64)
output_file_prefix = "resnet152_6channel"
if allpretrained:
output_file_prefix = output_file_prefix + "_allpretrained"
# Initialize a model of choice
model_pretrained_conv = tf.keras.applications.ResNet152(weights='imagenet', include_top=False)
# Get the weights of the model
weights = model_pretrained_conv.get_weights()
input_layer_weights = weights[0]
print("Changing weights of the input layer from", input_layer_weights.shape, "to", required_input_shape)
# Change the weights to desired shape
new_weights = np.random.normal(0, 0.001, required_input_shape)
new_weights[:, :, :3, :] = input_layer_weights
if allpretrained:
new_weights[:, :, 3:6, :] = input_layer_weights
weights[0] = new_weights
# Save the new weights
np.save(os.path.join("..", 'data', output_file_prefix + "_opticalflow_weights.npy"), weights)
def save_weights_resnet152_15channel(allpretrained=False):
# Initialize configuration
required_input_shape = (7, 7, 15, 64)
output_file_prefix = "resnet152_15channel"
if allpretrained:
output_file_prefix = output_file_prefix + "_allpretrained"
# Initialize a model of choice
model_pretrained_conv = tf.keras.applications.ResNet152(weights='imagenet', include_top=False)
# Get the weights of the model
weights = model_pretrained_conv.get_weights()
input_layer_weights = weights[0]
print("Changing weights of the input layer from", input_layer_weights.shape, "to", required_input_shape)
# Change the weights to desired shape
new_weights = np.random.normal(0, 0.001, required_input_shape)
new_weights[:, :, :3, :] = input_layer_weights # First image.
if allpretrained:
new_weights[:, :, 3:6, :] = input_layer_weights # OpticalFlow-1_2.
new_weights[:, :, 6:9, :] = input_layer_weights # Second image.
if allpretrained:
new_weights[:, :, 9:12, :] = input_layer_weights # OpticalFlow-2_3.
new_weights[:, :, 12:15, :] = input_layer_weights # Third image.
# Reassign new weights.
weights[0] = new_weights
# Save the new weights
np.save(os.path.join("..", 'data', output_file_prefix + "_opticalflow_weights.npy"), weights)
def save_weights_resnet152_13channel(allpretrained=False):
"""
image_1 (3) + image_2 (3) + image_3 (3) + opticalflow_average (3) + MOG2_mask (1)
"""
# Initialize configuration
required_input_shape = (7, 7, 13, 64)
output_file_prefix = "resnet152_13channel"
if allpretrained:
output_file_prefix = output_file_prefix + "_allpretrained"
# Initialize a model of choice
model_pretrained_conv = tf.keras.applications.ResNet152(weights='imagenet', include_top=False)
# Get the weights of the model
weights = model_pretrained_conv.get_weights()
input_layer_weights = weights[0]
print("Changing weights of the input layer from", input_layer_weights.shape, "to", required_input_shape)
# Change the weights to desired shape
new_weights = np.random.normal(0, 0.001, required_input_shape)
new_weights[:, :, :3, :] = input_layer_weights # First image.
new_weights[:, :, 3:6, :] = input_layer_weights # Second image.
new_weights[:, :, 6:9, :] = input_layer_weights # Third image.
if allpretrained:
new_weights[:, :, 9:12, :] = input_layer_weights # OpticalFlow-average.
# Mask always uses newly initialized weights.
# Reassign new weights.
weights[0] = new_weights
# Save the new weights
np.save(os.path.join("..", 'data', output_file_prefix + "_hybrid_weights.npy"), weights)
def save_weights_resnet152_16channel(allpretrained=False):
"""
image_1 (3) + opticalflow_1 (3) + image_2 (3) + opticalflow_2 (3) + image_3 (3) + MOG2_mask (1)
"""
# Initialize configuration
required_input_shape = (7, 7, 16, 64)
output_file_prefix = "resnet152_16channel"
if allpretrained:
output_file_prefix = output_file_prefix + "_allpretrained"
# Initialize a model of choice
model_pretrained_conv = tf.keras.applications.ResNet152(weights='imagenet', include_top=False)
# Get the weights of the model
weights = model_pretrained_conv.get_weights()
input_layer_weights = weights[0]
print("Changing weights of the input layer from", input_layer_weights.shape, "to", required_input_shape)
# Change the weights to desired shape
new_weights = np.random.normal(0, 0.001, required_input_shape)
new_weights[:, :, :3, :] = input_layer_weights # First image.
if allpretrained:
new_weights[:, :, 3:6, :] = input_layer_weights # OpticalFlow-1_2.
new_weights[:, :, 6:9, :] = input_layer_weights # Second image.
if allpretrained:
new_weights[:, :, 9:12, :] = input_layer_weights # OpticalFlow-2_3.
new_weights[:, :, 12:15, :] = input_layer_weights # Third image.
# Mask always uses newly initialized weights.
# Reassign new weights.
weights[0] = new_weights
# Save the new weights
np.save(os.path.join("..", 'data', output_file_prefix + "_hybrid_weights.npy"), weights)
def save_weights_resnet152_7channel(allpretrained=False):
# Initialize configuration
required_input_shape = (7, 7, 7, 64)
output_file_prefix = "resnet152_7channel"
if allpretrained:
output_file_prefix = output_file_prefix + "_allpretrained"
# Initialize a model of choice
model_pretrained_conv = tf.keras.applications.ResNet152(weights='imagenet', include_top=False)
# Get the weights of the model
weights = model_pretrained_conv.get_weights()
input_layer_weights = weights[0]
print("Changing weights of the input layer from", input_layer_weights.shape, "to", required_input_shape)
# Change the weights to desired shape
new_weights = np.random.normal(0, 0.001, required_input_shape)
new_weights[:, :, :3, :] = input_layer_weights
if allpretrained:
new_weights[:, :, 3:6, :] = input_layer_weights
# 7th Channel for Mask uses the randomly initialized weights. Therefore, leave it as it is.
weights[0] = new_weights
# Save the new weights
np.save(os.path.join("..", 'data', output_file_prefix + "_maskopticalflow_weights.npy"), weights)
| 2.71875
| 3
|
eflow/_hidden/helper_functions/visualize_multidimensional_multi_threading.py
|
EricCacciavillani/eFlow
| 1
|
12776147
|
<filename>eflow/_hidden/helper_functions/visualize_multidimensional_multi_threading.py
# Math utils
import os
from multiprocessing import Pool as ThreadPool
from functools import partial
import math
import numpy as np
import multiprocessing as mp
from collections import ChainMap
def find_all_dist_with_target(matrix,
index_array,
target_dp_index):
"""
Finds all distances between the target and the other points.
"""
distances = np.zeros(len(index_array) - target_dp_index - 1)
for index, dp_index in enumerate(index_array[
target_dp_index + 1:]):
distances[index] = fast_eudis(matrix[target_dp_index],
matrix[dp_index])
# self.__pbar.update(1)
all_distances_to_target = dict()
all_distances_to_target[target_dp_index] = distances
return all_distances_to_target
def find_all_distances_in_matrix(matrix):
index_array = [i for i in range(0,
len(matrix))]
pool = ThreadPool(mp.cpu_count() - 2)
func = partial(find_all_dist_with_target,
matrix,
index_array)
all_dp_distances = list(
pool.imap(func,
index_array[:-1]))
all_dp_distances = dict(ChainMap(*all_dp_distances))
pool.close()
pool.join()
return all_dp_distances
def weighted_eudis(v1,
v2,
feature_weights):
dist = [((a - b) ** 2) * w for a, b, w in zip(v1, v2,
feature_weights)]
dist = math.sqrt(sum(dist))
return dist
def fast_eudis(v1,
v2):
dist = [((a - b) ** 2) for a, b in zip(v1, v2)]
dist = math.sqrt(sum(dist))
return dist
| 2.859375
| 3
|
testserver/blueprints/__init__.py
|
IfengAutomation/AutomationTestServer
| 0
|
12776148
|
<reponame>IfengAutomation/AutomationTestServer
from . import api, casemanager, users
__all__ = [api, casemanager, users]
| 1.070313
| 1
|
esbo_etc/classes/optical_component/CosmicBackground.py
|
LukasK13/ESBO-ETC
| 0
|
12776149
|
from .AOpticalComponent import AOpticalComponent
from ..IRadiant import IRadiant
import astropy.units as u
from astropy.modeling.models import BlackBody
from ..Entry import Entry
from typing import Union
class CosmicBackground(AOpticalComponent):
"""
This class models the spectral radiance of the cosmic background as black body radiator
"""
@u.quantity_input(temp=[u.Kelvin, u.Celsius])
def __init__(self, parent: IRadiant, temp: u.Quantity = 2.725 * u.K, emissivity: float = 1):
"""
Initialize a new black body point source
Parameters
----------
parent : IRadiant
The parent element of the optical component from which the electromagnetic radiation is received
temp : Quantity in Kelvin / Celsius
Temperature of the black body
emissivity : float
The spectral emissivity coefficient for the optical surface.
Returns
-------
"""
# Create black body model with given temperature
bb = BlackBody(temperature=temp, scale=1 * u.W / (u.m ** 2 * u.nm * u.sr))
# Initialize super class
super().__init__(parent, 1.0, lambda wl: bb(wl) * emissivity)
@staticmethod
def check_config(conf: Entry) -> Union[None, str]:
"""
Check the configuration for this class
Parameters
----------
conf : Entry
The configuration entry to be checked.
Returns
-------
mes : Union[None, str]
The error message of the check. This will be None if the check was successful.
"""
mes = conf.check_quantity("temp", u.K)
if mes is not None:
return mes
if hasattr(conf, "emissivity"):
mes = conf.check_float("emissivity")
if mes is not None:
return mes
| 2.90625
| 3
|
sentinel_possesor.py
|
yohaimagen/gamma_sentinel
| 8
|
12776150
|
#! /home/yohai/anaconda2/bin/python
import sys,os,re,time,glob,string,getopt, argparse
import numpy as np
############################################################
#print messeges to standart error and log file #
############################################################
def message(mes): # messeges to standart error and log file
"""
Prints a message string to standart error and also to log file.
"""
sys.stderr.write(mes+"\n")
try:
log = open(message_logfile,'a')
except IOError:
sys.exit("Could not fined or open"+message_logfile)
else:
log.write(stage+" " +" "+mes+"\n")
log.close()
##########################################################
# logs commands and control errors #
##########################################################
def execlog(command): # logs commands and control errors
"""
controling the command executions using os.system, and logging the commands
if an error raise when trying to execute a command, stops the script and writting the
rest of commands to the log file after a 'Skipping from here' note.
"""
global skipping
try:
log = open(cmd_logfile,'a')
except IOError:
sys.exit("Could not fined "+cmd_logfile)
else:
log.write(command+"\n")
log.close()
if not skipping:
cmd_strerror = os.strerror(os.system(command))
if not cmd_strerror == 'Success':
message(cmd_strerror)
message("Faild at "+stage)
if not stage == "* Unwrapping":
message("############## Skipping from here ##############")
log = open(cmd_logfile,'a')
log.write("############## Skipping from here ##############\n")
log.close()
skipping = 1
else:
return "unwfaild"
def Setupprocdir():
message("****** Initialize dirs structure ******")
os.mkdir('dem')
def Import_SLCs():
'''
Make S1 list file in with each line is the absolute path to S1 scene directory
:return:
'''
message("****** Importing SLCs ******")
execlog('echo %s > S1_list' %master_path)
execlog('echo %s >> S1_list' %slave_path)
execlog('S1_TOPS_preproc S1_list slc %s S1_TOPS_preproc.log -m mli_%s_%s -r %s -a %s' %(pol, r_looks, a_looks, r_looks, a_looks))
execlog('OPOD_vec mli_%s_%s/%s_%s.mli.par %s' %(r_looks, a_looks, master, pol, orbit_files))
execlog('OPOD_vec mli_%s_%s/%s_%s.mli.par %s' % (r_looks, a_looks, slave, pol, orbit_files))
execlog('OPOD_vec slc/%s_iw%d_%s.slc.par %s' % (master, 1, pol, orbit_files))
execlog('OPOD_vec slc/%s_iw%d_%s.slc.par %s' % (master, 2, pol, orbit_files))
execlog('OPOD_vec slc/%s_iw%d_%s.slc.par %s' % (master, 3, pol, orbit_files))
execlog('OPOD_vec slc/%s_iw%d_%s.slc.par %s' % (slave, 1, pol, orbit_files))
execlog('OPOD_vec slc/%s_iw%d_%s.slc.par %s' % (slave, 2, pol, orbit_files))
execlog('OPOD_vec slc/%s_iw%d_%s.slc.par %s' % (slave, 3, pol, orbit_files))
def Download_DEM():
'''
Downloading srtmG1 DEM file and import it to Gamma
:return:
'''
def extract_corners(path):
with open(path) as f:
content = f.readlines()
max_lon, min_lon, max_lat, min_lat = 0, 0, 0, 0
for line in content:
words = line.split()
if len(words) >= 1:
if words[0] == 'min.':
if words[1] == 'latitude':
max_lat = float(words[7])
min_lat = float(words[3])
if words[1] == 'longitude':
max_lon = float(words[7])
min_lon = float(words[3])
return max_lon, min_lon, max_lat, min_lat
execlog('SLC_corners mli_%s_%s/%s_%s.mli.par > master_corners' %(r_looks, a_looks, master, pol))
execlog('SLC_corners mli_%s_%s/%s_%s.mli.par > slave_corners' % (r_looks, a_looks, slave, pol))
max_lon_m, min_lon_m, max_lat_m, min_lat_m = extract_corners('master_corners')
max_lon_s, min_lon_s, max_lat_s, min_lat_s = extract_corners('slave_corners')
max_lon = max_lon_m if max_lon_m > max_lon_s else max_lon_s
min_lon = min_lon_m if min_lon_m < min_lon_s else min_lon_s
max_lat = max_lat_m if max_lat_m > max_lat_s else max_lat_s
min_lat = min_lat_m if min_lat_m < min_lat_s else min_lat_s
execlog('wget "http://opentopo.sdsc.edu/otr/getdem?demtype=SRTMGL1&west=%f&south=%f&east=%f&north=%f&outputFormat=GTiff" -O dem/%s_dem.tif' %(min_lon, min_lat, max_lon, max_lat, interferogram_str))
def Import_DEM():
execlog('srtm2dem dem/%s_dem.tif dem/%s_dem dem/%s_dem.par 3 - -' %(interferogram_str, interferogram_str, interferogram_str))
def getWidth():
global width, r_pixel_size, a_pixel_size, lat
with open('mli_%s_%s/%s_%s.mli.par' %(r_looks, a_looks, master, pol)) as f:
content = f.readlines()
for line in content:
words = line.split()
if len(words) > 0:
if words[0] == 'range_samples:':
width = words[1]
if words[0] == 'range_pixel_spacing:':
r_pixel_size = float(words[1])
if words[0] == 'azimuth_pixel_spacing:':
a_pixel_size = float(words[1])
if words[0] == 'center_latitude:':
lat = float(words[1])
def DEM_geocode():
'''
Use perl script mk_geo_rdcal to generate lockup table between SAR to geographic geometry and transform the dam to SAR geometry the script have 3 stages
:return:
'''
pixel_siz = a_pixel_size if a_pixel_size < r_pixel_size else r_pixel_size
deg_pixel_size = pixel_siz / (111319.9 * np.cos(np.deg2rad(lat)))
execlog('mk_geo_radcal mli_%s_%s/%s_%s.mli mli_%s_%s/%s_%s.mli.par dem/%s_dem dem/%s_dem.par geo/%s.dem geo/%s.dem_par geo %s %f 0 2 -s .7 -e .35 -p -c -d' %(
r_looks, a_looks, master, pol, r_looks, a_looks, master, pol, interferogram_str, interferogram_str,
interferogram_str, interferogram_str, interferogram_str, deg_pixel_size))
execlog(
'mk_geo_radcal mli_%s_%s/%s_%s.mli mli_%s_%s/%s_%s.mli.par dem/%s_dem dem/%s_dem.par geo/%s.dem geo/%s.dem_par geo %s %f 1 2 -s .7 -e .35 -p -c -d' % (
r_looks, a_looks, master, pol, r_looks, a_looks, master, pol, interferogram_str, interferogram_str,
interferogram_str, interferogram_str, interferogram_str, deg_pixel_size))
execlog(
'mk_geo_radcal mli_%s_%s/%s_%s.mli mli_%s_%s/%s_%s.mli.par dem/%s_dem dem/%s_dem.par geo/%s.dem geo/%s.dem_par geo %s %f 2 2 -s .7 -e .35 -p -c -d' % (
r_looks, a_looks, master, pol, r_looks, a_looks, master, pol, interferogram_str, interferogram_str,
interferogram_str, interferogram_str, interferogram_str, deg_pixel_size))
execlog(
'mk_geo_radcal mli_%s_%s/%s_%s.mli mli_%s_%s/%s_%s.mli.par dem/%s_dem dem/%s_dem.par geo/%s.dem geo/%s.dem_par geo %s %f 3 2 -s .7 -e .35 -p -c -d' % (
r_looks, a_looks, master, pol, r_looks, a_looks, master, pol, interferogram_str, interferogram_str,
interferogram_str, interferogram_str, interferogram_str, deg_pixel_size))
def Resampling():
'''
Using shell scrip S1_coref_TOPS to co-register slvae image to master image and calculate the differential interferogram
:return:
'''
execlog('echo slc/%s_iw%d_%s.slc slc/%s_iw%d_%s.slc.par slc/%s_iw%d_%s.tops_par > SLC_%s_tab' %(master, 1, pol, master, 1, pol, master, 1, pol, master))
execlog('echo slc/%s_iw%d_%s.slc slc/%s_iw%d_%s.slc.par slc/%s_iw%d_%s.tops_par >> SLC_%s_tab' % (
master, 2, pol, master, 2, pol, master, 2, pol, master))
execlog('echo slc/%s_iw%d_%s.slc slc/%s_iw%d_%s.slc.par slc/%s_iw%d_%s.tops_par >> SLC_%s_tab' % (
master, 3, pol, master, 3, pol, master, 3, pol, master))
execlog('echo slc/%s_iw%d_%s.slc slc/%s_iw%d_%s.slc.par slc/%s_iw%d_%s.tops_par > SLC_%s_tab' % (
slave, 1, pol, slave, 1, pol, slave, 1, pol, slave))
execlog('echo slc/%s_iw%d_%s.slc slc/%s_iw%d_%s.slc.par slc/%s_iw%d_%s.tops_par >> SLC_%s_tab' % (
slave, 2, pol, slave, 2, pol, slave, 2, pol, slave))
execlog('echo slc/%s_iw%d_%s.slc slc/%s_iw%d_%s.slc.par slc/%s_iw%d_%s.tops_par >> SLC_%s_tab' % (
slave, 3, pol, slave, 3, pol, slave, 3, pol, slave))
execlog('echo slc/%s_iw%d_%s.rslc slc/%s_iw%d_%s.rslc.par slc/%s_iw%d_%s.rtops_par > SLCR_%s_tab' % (
slave, 1, pol, slave, 1, pol, slave, 1, pol, slave))
execlog('echo slc/%s_iw%d_%s.rslc slc/%s_iw%d_%s.rslc.par slc/%s_iw%d_%s.rtops_par >> SLCR_%s_tab' % (
slave, 2, pol, slave, 2, pol, slave, 2, pol, slave))
execlog('echo slc/%s_iw%d_%s.rslc slc/%s_iw%d_%s.rslc.par slc/%s_iw%d_%s.rtops_par >> SLCR_%s_tab' % (
slave, 3, pol, slave, 3, pol, slave, 3, pol, slave))
execlog('S1_coreg_TOPS SLC_%s_tab %s SLC_%s_tab %s SLCR_%s_tab geo/%s_dem.rdc %s %s - - 0.6 0.02 0.8' %(master, master, slave, slave, slave, interferogram_str, r_looks, a_looks))
def getDEMwidth():
global dem_width
with open('geo/%s.dem_par' %interferogram_str) as f:
content = f.readlines()
for line in content:
words = line.split()
if len(words) > 0:
if words[0] == 'width:':
dem_width = words[-1]
def Interferogram():
'''
Compute interferogram
'''
execlog('SLC_intf %s.rslc %s.rslc %s.rslc.par %s.rslc.par %s.off %s.int %s %s' %(master, slave, master, slave, interferogram_str, interferogram_str, r_looks, a_looks))
def Flattening():
'''
subtructe simulated unwrap phase from interferogram
'''
execlog('sub_phase %s.int %s.sim_unw %s.diff_par flat_%s.int 1' %(interferogram_str, interferogram_str, interferogram_str, interferogram_str))
def Filtering():
'''
preforme adaptive filter on interferogram
'''
execlog('adf flat_%s.int filt_flat_%s.int - %s 1.0 32 7 - 0 0 .7' %(interferogram_str, interferogram_str, width))
def Unwrapping():
execlog('cc_wave filt_flat_%s.int - - filt_%s.ccw %s %s %s' %(interferogram_str, interferogram_str, width, cc_win, cc_win))
execlog('rascc_mask filt_%s.ccw %s.rmli %s 1 1 0 1 1 %s 0. 0.1 0.9 1. .35 1 filt_%s.mask.ras' %(interferogram_str, master, width, cc_threshold, interferogram_str))
execlog('mcf filt_flat_%s.int filt_%s.ccw filt_%s.mask.ras filt_%s.unw %s 1 - - - - 1 1 1024 1705 1639' %(interferogram_str, interferogram_str, interferogram_str, interferogram_str, width))
def Geocoding_back():
execlog('geocode_back filt_flat_%s.int %s geo/%s_1.map_to_rdc geo_%s.int %s - 0 1' %(interferogram_str, width, interferogram_str, interferogram_str, dem_width))
execlog('geocode_back %s.rmli %s geo/%s_1.map_to_rdc geo_%s.rmli %s - 0 0' %(master, width, interferogram_str, master, dem_width))
execlog('geocode_back filt_%s.unw %s geo/%s_1.map_to_rdc geo_%s.unw %s - 0 0' %(interferogram_str, width, interferogram_str, interferogram_str, dem_width))
execlog('cpx_to_real geo_%s.int geo_%s_real.int %s 4' %(interferogram_str, interferogram_str, dem_width))
def Make_headers():
execlog('par2rsc.py geo_%s.unw geo/%s.dem_par -h 0 -g' %(interferogram_str, interferogram_str))
execlog('par2rsc.py geo_%s_real.int geo/%s.dem_par -h 0 -g' %(interferogram_str, interferogram_str))
execlog('par2rsc.py geo_%s.rmli geo/%s.dem_par -h 0 -g' %(master, interferogram_str))
execlog('par2rsc.py geo/%s.dem geo/%s.dem_par -h 0 -g' %(interferogram_str, interferogram_str))
def Disp():
execlog('rasrmg geo_%s.unw geo_%s.rmli %s' %(interferogram_str, master, dem_width))
execlog('convert geo_%s.unw.ras geo_%s.unw.jpg' %(interferogram_str, interferogram_str))
execlog('rasmph_pwr geo_%s.int geo_%s.rmli %s 1 1 0 1 1 1. .35 1 geo_%s.int.ras' %(interferogram_str, master, dem_width, interferogram_str))
execlog('convert geo_%s.int.ras geo_%s.int.jpg' %(interferogram_str, interferogram_str))
def End():
print '########## end proccesing ###############'
Process = ['Setupprocdir','Import_SLCs', 'Download_DEM', 'Import_DEM','getWidth','DEM_geocode','Resampling','getDEMwidth', 'Interferogram','Flattening','Filtering','Unwrapping', 'Geocoding_back','Make_headers', 'Disp','End']
Process_dict = {Process[i] : i for i in range(len(Process))}
Process_funcs = {'Setupprocdir':Setupprocdir,'Import_SLCs':Import_SLCs, 'Download_DEM':Download_DEM, 'Import_DEM':Import_DEM,'getWidth':getWidth,'DEM_geocode':DEM_geocode,'Resampling':Resampling,'getDEMwidth':getDEMwidth, 'Interferogram':Interferogram,'Flattening':Flattening,'Filtering':Filtering,'Unwrapping':Unwrapping, 'Geocoding_back':Geocoding_back,'Make_headers':Make_headers, 'Disp':Disp,'End':End}
arg_parser = argparse.ArgumentParser(description="This program process 2-pass sentinel interferogram from raw data to unwraped phase")
arg_parser.add_argument("master", help="Master sentinel SAR image data base dir path")
arg_parser.add_argument("slave", help="Slave sentinel SAR image data base dir path")
arg_parser.add_argument("-s", help="start processing at one of -- " + string.join(Process[:-1], "','"))
arg_parser.add_argument("-e", help="end processing at one of -s following process")
arg_parser.add_argument("--m_name", help="name of master file, if not spesipy taken to by master dir name")
arg_parser.add_argument("--s_name", help="name of slave file, if not spesipy taken to by slave dir name")
arg_parser.add_argument("--pol", help="SLC polarization to extract (hh,hv,vh,vv) default vv")
arg_parser.add_argument("--r_looks", help="number of range looks default: 20")
arg_parser.add_argument("--a_looks", help="number of azimuth looks default: 4")
arg_parser.add_argument("--cc", help="unwraping coherince threshold default: 0.2")
arg_parser.add_argument("--cc_win", help="coherince window size(in pixels) default: 5")
arg_parser.add_argument("--clean", help="delete all but output files defualt: false")
arg_parser.add_argument("--orbit_files", help="path to orbit files directory, if not spesfied assume their is a direcory named 'orbit_files' in the working direcory")
args = arg_parser.parse_args()
master_path = args.master
slave_path = args.slave
if args.m_name:
master = args.m_name
else:
master = master_path.split('/')[-1]
if args.s_name:
slave = args.s_name
else:
slave = slave_path.split('/')[-1]
interferogram_str = args.master + '_' + args.slave
cmd_logfile = interferogram_str + '_cmd_log'
message_logfile = interferogram_str + '_msg_log'
if args.pol:
if not args.pol in ('hh', 'hv', 'vh', 'vv'):
print args.pol + 'is not a valid polarization'
arg_parser.print_help()
pol = args.pol
else:
pol = 'vv'
if args.r_looks:
r_looks = args.r_looks
else:
r_looks = '20'
if args.a_looks:
a_looks = args.a_looks
else:
a_looks = '4'
if args.cc:
cc_threshold = args.cc
else:
cc_threshold = '0.2'
if args.cc_win:
cc_win = args.cc_win
else:
cc_win = '5'
if args.clean:
clean = True
else:
clean = False
if args.s:
if not args.s in Process:
print args.s + "not a process"
arg_parser.print_help()
exit(1)
stage = args.s
else:
stage = Process[0]
if args.orbit_files:
orbit_files = args.orbit_files
else:
if Process_dict[stage] < 2:
ls = os.listdir('.')
if 'orbit_files' not in ls:
print 'no orbit files directory'
arg_parser.print_help()
exit(1)
else:
orbit_files = 'orbit_files'
if args.e:
if not args.e in Process:
print args.e + "not a process"
arg_parser.print_help()
exit(1)
if Process_dict[args.e] < Process_dict[args.s]:
print args.s + " should be before " + args.e
end = args.s
else:
end = Process[-1]
width = 0
r_pixel_size = 0
a_pixel_size = 0
lat = 0
dem_width = 0
skipping = False
if Process_dict[stage] > 4:
getWidth()
if Process_dict[stage] > 7:
getDEMwidth()
for i in range(Process_dict[stage], Process_dict[end]):
stage = Process[i]
Process_funcs[Process[i]]()
| 2.65625
| 3
|