hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
04bfbe8a0aeff5f65ed33297c432a971d8ee587d
| 8,895
|
py
|
Python
|
tests/integration/test_fitness_predictor_island_evo.py
|
nolanstr/bingo_multi_stage
|
7a88c4f5c59268d0612664be5864765db2edad51
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_fitness_predictor_island_evo.py
|
nolanstr/bingo_multi_stage
|
7a88c4f5c59268d0612664be5864765db2edad51
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_fitness_predictor_island_evo.py
|
nolanstr/bingo_multi_stage
|
7a88c4f5c59268d0612664be5864765db2edad51
|
[
"Apache-2.0"
] | null | null | null |
# Ignoring some linting rules in tests
# pylint: disable=redefined-outer-name
# pylint: disable=missing-docstring
import pytest
import numpy as np
from bingo.chromosomes.multiple_values import SinglePointCrossover, \
SinglePointMutation, MultipleValueChromosomeGenerator
from bingo.evolutionary_optimizers.fitness_predictor_island \
import FitnessPredictorIsland as FPI
from bingo.evolutionary_optimizers \
import fitness_predictor_island as fpi_module
from bingo.evolutionary_algorithms.mu_plus_lambda import MuPlusLambda
from bingo.selection.tournament import Tournament
from bingo.evaluation.evaluation import Evaluation
from bingo.evaluation.fitness_function import FitnessFunction
from bingo.stats.hall_of_fame import HallOfFame
MAIN_POPULATION_SIZE = 40
PREDICTOR_POPULATION_SIZE = 4
TRAINER_POPULATION_SIZE = 4
SUBSET_TRAINING_DATA_SIZE = 2
FULL_TRAINING_DATA_SIZE = 20
class DistanceToAverage(FitnessFunction):
def __call__(self, individual):
self.eval_count += 1
avg_data = np.mean(self.training_data)
return np.linalg.norm(individual.values - avg_data)
@pytest.fixture
def full_training_data():
return np.linspace(0.1, 1, FULL_TRAINING_DATA_SIZE)
@pytest.fixture
def ev_alg(full_training_data):
crossover = SinglePointCrossover()
mutation = SinglePointMutation(np.random.random)
selection = Tournament(2)
fitness = DistanceToAverage(full_training_data)
evaluator = Evaluation(fitness)
return MuPlusLambda(evaluator, selection, crossover, mutation,
0., 1.0, MAIN_POPULATION_SIZE)
@pytest.fixture
def generator():
return MultipleValueChromosomeGenerator(np.random.random, 10)
@pytest.fixture
def fitness_predictor_island(ev_alg, generator):
island = FPI(ev_alg, generator, MAIN_POPULATION_SIZE,
predictor_population_size=PREDICTOR_POPULATION_SIZE,
trainer_population_size=TRAINER_POPULATION_SIZE,
predictor_size_ratio=SUBSET_TRAINING_DATA_SIZE/FULL_TRAINING_DATA_SIZE,
predictor_computation_ratio=0.4,
trainer_update_frequency=4,
predictor_update_frequency=5)
island._predictor_island._ea.variation._mutation_probability = 1.0
return island
@pytest.fixture
def fp_island_and_hof(ev_alg, generator):
hof = HallOfFame(5)
fp_island = FPI(ev_alg, generator, MAIN_POPULATION_SIZE,
predictor_population_size=PREDICTOR_POPULATION_SIZE,
trainer_population_size=TRAINER_POPULATION_SIZE,
predictor_size_ratio=SUBSET_TRAINING_DATA_SIZE/FULL_TRAINING_DATA_SIZE,
predictor_computation_ratio=0.4,
trainer_update_frequency=4,
predictor_update_frequency=5,
hall_of_fame=hof)
fp_island._predictor_island._ea.variation._mutation_probability = 1.0
return fp_island, hof
def test_best_fitness_is_true_fitness(fitness_predictor_island,
full_training_data):
true_fitness_function = DistanceToAverage(full_training_data)
best_individual = fitness_predictor_island.get_best_individual()
best_fitness = fitness_predictor_island.get_best_fitness()
expected_best_fitness = true_fitness_function(best_individual)
assert best_fitness == expected_best_fitness
def test_predictor_compute_ratios(fitness_predictor_island):
# init
point_evals_predictor = FULL_TRAINING_DATA_SIZE*TRAINER_POPULATION_SIZE
point_evals_predictor += 2 * point_evals_per_predictor_step()
point_evals_main = 0
assert_expected_compute_ratio(fitness_predictor_island,
point_evals_main, point_evals_predictor)
# main step
fitness_predictor_island.evolve(1, suppress_logging=True)
point_evals_main += 2 * point_evals_per_main_step()
assert_expected_compute_ratio(fitness_predictor_island,
point_evals_main, point_evals_predictor)
# main + predictor
fitness_predictor_island.evolve(1, suppress_logging=True)
point_evals_main += point_evals_per_main_step()
point_evals_predictor += point_evals_per_predictor_step()
assert_expected_compute_ratio(fitness_predictor_island,
point_evals_main, point_evals_predictor)
# main + 2 predictor
fitness_predictor_island.evolve(1, suppress_logging=True)
point_evals_main += point_evals_per_main_step()
point_evals_predictor += 2 * point_evals_per_predictor_step()
assert_expected_compute_ratio(fitness_predictor_island,
point_evals_main, point_evals_predictor)
# main + predictor + trainer update
fitness_predictor_island.evolve(1, suppress_logging=True)
point_evals_main += point_evals_per_main_step()
point_evals_predictor += point_evals_per_predictor_step()
point_evals_predictor += point_evals_per_trainer_update()
assert_expected_compute_ratio(fitness_predictor_island,
point_evals_main, point_evals_predictor)
# main + predictor update
fitness_predictor_island.evolve(1, suppress_logging=True)
point_evals_main += point_evals_per_main_step()
point_evals_main += point_evals_per_predictor_update()
assert_expected_compute_ratio(fitness_predictor_island,
point_evals_main, point_evals_predictor)
def test_fitness_predictor_island_ages(fitness_predictor_island):
predictor_age = 1
main_age = 0
assert fitness_predictor_island.generational_age == main_age
assert fitness_predictor_island._predictor_island.generational_age \
== predictor_age
fitness_predictor_island._execute_generational_step()
main_age += 1
assert fitness_predictor_island.generational_age == main_age
assert fitness_predictor_island._predictor_island.generational_age \
== predictor_age
fitness_predictor_island._execute_generational_step()
main_age += 1
predictor_age += 1
assert fitness_predictor_island.generational_age == main_age
assert fitness_predictor_island._predictor_island.generational_age \
== predictor_age
fitness_predictor_island._execute_generational_step()
main_age += 1
predictor_age += 2
assert fitness_predictor_island.generational_age == main_age
assert fitness_predictor_island._predictor_island.generational_age \
== predictor_age
fitness_predictor_island._execute_generational_step()
main_age += 1
predictor_age += 1
assert fitness_predictor_island.generational_age == main_age
assert fitness_predictor_island._predictor_island.generational_age \
== predictor_age
def test_nan_on_predicted_variance_of_trainer(mocker,
fitness_predictor_island):
mocker.patch('bingo.evolutionary_optimizers.'
'fitness_predictor_island.np.var')
fpi_module.np.var.side_effect = OverflowError
island = fitness_predictor_island
trainer = island.population[0]
variance = island._calculate_predictor_variance_of(trainer)
assert np.isnan(variance)
def test_hof_gets_filled(fp_island_and_hof):
fp_island, hof = fp_island_and_hof
fp_island.evolve(1)
assert len(hof) == 5
def test_hof_has_true_fitness(fp_island_and_hof, full_training_data):
fp_island, hof = fp_island_and_hof
true_fitness_function = DistanceToAverage(full_training_data)
fp_island.evolve(1)
for indv in hof:
true_fitness = true_fitness_function(indv)
assert indv.fitness == pytest.approx(true_fitness)
def test_temp_hof_is_cleared_with_predictor_update(fp_island_and_hof, mocker):
fp_island, hof = fp_island_and_hof
mocker.spy(fp_island._hof_w_predicted_fitness, 'clear')
fp_island.evolve(9)
assert fp_island._hof_w_predicted_fitness.clear.call_count == 1
def assert_expected_compute_ratio(fitness_predictor_island, point_evals_main,
point_evals_predictor):
current_ratio = \
fitness_predictor_island._get_predictor_computation_ratio()
np.testing.assert_almost_equal(current_ratio,
point_evals_predictor /
(point_evals_predictor + point_evals_main))
def point_evals_per_predictor_step():
return SUBSET_TRAINING_DATA_SIZE * PREDICTOR_POPULATION_SIZE \
* TRAINER_POPULATION_SIZE
def point_evals_per_main_step():
return SUBSET_TRAINING_DATA_SIZE * MAIN_POPULATION_SIZE
def point_evals_per_trainer_update():
return SUBSET_TRAINING_DATA_SIZE * MAIN_POPULATION_SIZE * \
PREDICTOR_POPULATION_SIZE + FULL_TRAINING_DATA_SIZE + \
point_evals_per_predictor_step()
def point_evals_per_predictor_update():
return point_evals_per_main_step()
| 38.012821
| 91
| 0.747611
|
101fc0e864a53816f3f704d98c0b8b134c9526b8
| 4,363
|
py
|
Python
|
ottoscript/triggers.py
|
qui3xote/ottolib
|
02a18051c42170de3718040ef4d0141216adf6f2
|
[
"MIT"
] | 2
|
2022-01-09T18:42:55.000Z
|
2022-01-30T00:33:57.000Z
|
ottoscript/triggers.py
|
qui3xote/ottolib
|
02a18051c42170de3718040ef4d0141216adf6f2
|
[
"MIT"
] | 2
|
2022-01-08T15:22:06.000Z
|
2022-03-04T11:03:43.000Z
|
ottoscript/triggers.py
|
qui3xote/ottolib
|
02a18051c42170de3718040ef4d0141216adf6f2
|
[
"MIT"
] | null | null | null |
from itertools import product
from pyparsing import (CaselessKeyword,
Optional,
Group,
)
from .ottobase import OttoBase
from .keywords import FROM, TO, FOR, ON, BEFORE, AFTER, SUNRISE, SUNSET
from .datatypes import Entity, Number, List, String, Var
from .time import RelativeTime, TimeStamp, DayOfWeek
class StateTrigger(OttoBase):
@property
def strings(self):
strings = []
try:
for e in self.entities.contents:
if type(e) == Var:
e = e.fetch()
string = []
if self.new is not None:
string.append(f"{e.name} == '{self.new}'")
if self.old is not None:
string.append(f"{e.name}.old == '{self.old}'")
if len(string) == 0:
string.append(f"{e.name}")
strings.append(" and ".join(string))
except Exception as error:
self.ctx.log.error(f"Unable to parse state trigger {error}")
return strings
def as_dict(self):
return {
'type': self.type,
'strings': self.strings,
'hold': self.hold_seconds
}
@classmethod
def parsers(cls):
return [subclass() for subclass in cls.__subclasses__()]
class StateChange(StateTrigger):
term = (Entity() | Number() | String())
parser = Group(
List(Entity())("entities")
+ CaselessKeyword("CHANGES")
+ Optional(FROM + (Entity()("_old")
| Number()("_old") | String()("_old")))
+ Optional(TO + (Entity()("_new")
| Number()("_new") | String()("_new")))
+ Optional(FOR + (TimeStamp()("_hold")
| RelativeTime()("_hold")))
)
@property
def hold_seconds(self):
if hasattr(self, '_hold'):
return self._hold.seconds
else:
return 0
@property
def old(self):
if hasattr(self, "_old"):
return self._old._value
else:
return None
@property
def new(self):
if hasattr(self, "_new"):
return self._new._value
else:
return None
@property
def type(self):
return 'state'
def as_list(self):
return [
{
'type': self.type,
'string': string,
'hold': self.hold_seconds
}
for string in self.strings
]
class TimeTrigger(OttoBase):
@property
def strings(self):
prod = product(self.days, self.times)
strings = [f"once({x[0]} {x[1]} + {self.offset}s)" for x in prod]
return strings
@property
def type(self):
return 'time'
@property
def days(self):
if not hasattr(self, "_days"):
return ['']
else:
result = []
for parser in self._days.contents:
result.extend(parser.days)
return result
def as_list(self):
return [
{'type': self.type, 'string': string}
for string in self.strings
]
@classmethod
def parsers(cls):
return [subclass() for subclass in cls.__subclasses__()]
class WeeklySchedule(TimeTrigger):
parser = Group(List(TimeStamp())("_times")
+ Optional(ON + List(DayOfWeek())("_days"))
)
def __init__(self, tokens):
super().__init__(tokens)
self.times = [x.string for x in self._times.contents]
@property
def offset(self):
return 0
class SunEvent(TimeTrigger):
parser = Group(Optional(RelativeTime()("time")
+ (BEFORE | AFTER)("relative")
)("_offset")
+ (SUNRISE("_time")
| SUNSET("_time")
)
+ Optional(ON + List(DayOfWeek())("_days"))
)
@property
def offset(self):
if not hasattr(self, "_offset"):
return 0
else:
sign = 1 if self._offset[1] == "AFTER" else -1
return sign * self._offset[0].seconds
@property
def times(self):
return [self._time.lower()]
| 25.816568
| 73
| 0.494385
|
612e55ad8da63f206953e6a1085e6d4c83234c0b
| 1,736
|
py
|
Python
|
tests/test_server.py
|
NYUDevOpsPayments/payments
|
c472c4f3102e13420a56284173def3f8de0c4509
|
[
"Apache-2.0"
] | null | null | null |
tests/test_server.py
|
NYUDevOpsPayments/payments
|
c472c4f3102e13420a56284173def3f8de0c4509
|
[
"Apache-2.0"
] | 41
|
2018-02-23T00:08:08.000Z
|
2018-03-26T22:27:27.000Z
|
tests/test_server.py
|
NYUDevOpsPayments/payments
|
c472c4f3102e13420a56284173def3f8de0c4509
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016, 2017 John J. Rofrano. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Payment API Service Test Suite
Test cases can be run with the following:
nosetests -v --with-spec --spec-color
coverage report -m
"""
import unittest
import os
import json
from flask_api import status # HTTP Status Codes
from mock import MagicMock, patch
import server
######################################################################
# T E S T C A S E S
######################################################################
class TestPetServer(unittest.TestCase):
""" Pet Server Tests """
def setUp(self):
""" Runs before each test """
self.app = server.app.test_client()
def tearDown(self):
pass
def test_index(self):
""" Test the Home Page """
resp = self.app.get('/')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = json.loads(resp.data)
self.assertEqual(data['name'], 'Payment Demo REST API Service')
######################################################################
# M A I N
######################################################################
if __name__ == '__main__':
unittest.main()
| 29.931034
| 74
| 0.578917
|
cf78c0537ae494cd16f2a8958dbb8ae2fdc51baf
| 6,236
|
py
|
Python
|
build.py
|
TinusChen/libPSI
|
7fdf02eee3ae5d632b9187b58ffa011300f31c4d
|
[
"Unlicense"
] | null | null | null |
build.py
|
TinusChen/libPSI
|
7fdf02eee3ae5d632b9187b58ffa011300f31c4d
|
[
"Unlicense"
] | null | null | null |
build.py
|
TinusChen/libPSI
|
7fdf02eee3ae5d632b9187b58ffa011300f31c4d
|
[
"Unlicense"
] | null | null | null |
import os
import platform
import sys
import multiprocessing
import subprocess
import glob
# find the ninja generator on windows.
def getGenerator(args):
#osStr = (platform.system())
#
#if osStr == "Windows":
#
# for x in args:
# if x.startswith("-G"):
# break
#
# vswhereArgs = ['C:/Program Files (x86)/Microsoft Visual Studio/Installer/vswhere.exe', "-prerelease", "-latest", "-property", "installationPath"]
# rootpath = subprocess.check_output(vswhereArgs).decode("utf-8").strip()
#
# ninja = rootpath + "/COMMON7/IDE/COMMONEXTENSIONS/MICROSOFT/CMAKE/Ninja/ninja.exe"
# cl = rootpath + "/VC/Tools/MSVC/*/bin/Hostx64/x64/cl.exe"
# cls = glob.glob(cl)
# if len(cls) > 0:
# cl = cls[-1];
#
# # use ninja
# if os.path.exists(ninja) and os.path.exists(cl):
# return "-G \"Ninja\" -DCMAKE_MAKE_PROGRAM=\"{0}\" -DCMAKE_C_COMPILER:FILEPATH=\"{1}\" -DCMAKE_CXX_COMPILER:FILEPATH=\"{1}\" ".format(ninja, cl)
# else:
# print("failed to find ninja at: {0}\n or cl".format(ninja))
#
# use the default
return ""
def parseInstallArgs(args):
prefix = ""
doInstall = False
for x in args:
if x.startswith("--install="):
prefix = x.split("=",1)[1]
prefix = os.path.abspath(os.path.expanduser(prefix))
idx = args.index(x)
args[idx] = "-DCMAKE_INSTALL_PREFIX=" + prefix
doInstall = True
if x == "--install":
idx = args.index(x)
osStr = (platform.system())
if osStr == "Windows":
args[idx] = "-DCMAKE_INSTALL_PREFIX=c:/lib"
else:
args[idx] = "-DCMAKE_INSTALL_PREFIX=/usr/local"
doInstall = True
return (args, doInstall)
def getParallel(args):
par = multiprocessing.cpu_count()
for x in args:
if x.startswith("--par="):
val = x.split("=",1)[1]
par = int(val)
if par < 1:
par = 1
idx = args.index(x)
args[idx] = ""
return (args,par)
def replace(list, find, replace):
if find in list:
idx = list.index(find)
list[idx] = replace;
return list
def Build(projectName, argv):
osStr = (platform.system())
buildDir = ""
config = ""
buildType = ""
# use sudo when installing?
sudo = "--sudo" in argv;
argv = replace(argv, "--sudo", "-DSUDO_FETCH=ON")
if not sudo:
argv.append("-DSUDO_FETCH=OFF")
argv.append("-DENABLE_ALL_PSI=ON")
generator = getGenerator(argv)
# do not automaticly download dependancies
if "--noauto" in argv:
argv = replace(argv, "--noauto", "")
argv.append("-DFETCH_AUTO=OFF")
else:
argv.append("-DFETCH_AUTO=ON")
# get install options
argv, install = parseInstallArgs(argv)
# get parallel build options
argv, par = getParallel(argv)
argv.append("-DPARALLEL_FETCH="+str(par))
# do not run cmake config
noConfig = "--nc" in argv
argv = replace(argv, "--nc", "")
# only run cmake config.
setup = "--setup" in argv;
argv = replace(argv, "--setup", "")
# build type.
if "--debug" in argv:
buildType = "Debug"
else:
buildType = "Release"
argv.append("-DCMAKE_BUILD_TYPE={0}".format(buildType))
argv = replace(argv, "--debug", "")
# build dir
if osStr == "Windows":
buildDir = "out/build/x64-{0}".format(buildType)
config = "--config {0}".format(buildType)
elif osStr == "Darwin":
buildDir = "out/build/osx"
else:
buildDir = "out/build/linux"
# convert args to a string.
argStr = ""
for a in argv:
argStr = argStr + " " + a
# parallel build
parallel = ""
if par != 1:
parallel = " --parallel " + str(par)
# build commands
mkDirCmd = "mkdir -p {0}".format(buildDir);
CMakeCmd = "cmake {0} -S . -B {1} {2} ".format(generator, buildDir, argStr)
BuildCmd = "cmake --build {0} {1} {2} ".format(buildDir, config, parallel)
InstallCmd = ""
if sudo:
sudo = "sudo "
else:
sudo = ""
if install:
InstallCmd = sudo
InstallCmd += "cmake --install {0} {1} ".format(buildDir, config)
# print and execute commands.
print("\n\n====== build.py ("+projectName+") ========")
if not noConfig:
print(mkDirCmd)
print(CMakeCmd)
if not setup:
print(BuildCmd)
if len(InstallCmd):
print(InstallCmd)
print("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n\n")
if not noConfig:
os.system(mkDirCmd)
os.system(CMakeCmd)
if not setup:
os.system(BuildCmd)
if len(sudo) > 0:
print("installing "+projectName+": {0}\n".format(InstallCmd))
os.system(InstallCmd)
def help():
print(" --install \n\tInstructs the script to install whatever is currently being built to the default location.")
print(" --install=prefix \n\tinstall to the provided predix.")
print(" --sudo \n\twhen installing, use sudo. May require password.")
print(" --par=n \n\twhen building do use parallel builds with n threads. default = num cores.")
print(" --noauto \n\twhen building do not automaticly fetch dependancies.")
print(" --par=n \n\twhen building do use parallel builds with n threads. default = num cores.")
print(" --debug \n\tdebug build.")
print("any additioanl arguments are forwared to cmake.\n")
print("-build the library")
print(" python build.py")
print("-build the library with cmake configurations")
print(" python build.py --debug -DLIBPSI_ENABLE_X=ON")
print("-build the library and install with sudo")
print(" python build.py --install --sudo")
print("-build the library and install to prefix")
print(" python build.py --install=~/my/install/dir ")
def main(projectName, argv):
if "--help" in argv:
help()
return
# build the project.
Build(projectName, argv)
if __name__ == "__main__":
main("LIBPSI", sys.argv[1:])
| 28.87037
| 157
| 0.574727
|
771f8af69b1b57a934b98296a57e9c2a4cb6592a
| 13,441
|
py
|
Python
|
altamisa/isatab/validate_investigation.py
|
bihealth/altamisa
|
2b0ade8dd5baf5b705ff9b27a697b40c889a8f2b
|
[
"MIT"
] | 2
|
2018-04-14T15:04:01.000Z
|
2020-12-08T15:39:09.000Z
|
altamisa/isatab/validate_investigation.py
|
bihealth/altamisa
|
2b0ade8dd5baf5b705ff9b27a697b40c889a8f2b
|
[
"MIT"
] | 49
|
2018-02-22T15:58:37.000Z
|
2022-03-01T08:50:48.000Z
|
altamisa/isatab/validate_investigation.py
|
bihealth/altamisa
|
2b0ade8dd5baf5b705ff9b27a697b40c889a8f2b
|
[
"MIT"
] | 3
|
2019-02-19T21:23:12.000Z
|
2019-08-20T14:35:07.000Z
|
# -*- coding: utf-8 -*-
"""Validation of an ISA investigation
Eventually, all format independent content- and specification-related validations which
don't interrupt model creation definitely (e.g. when parsing from ISA-tab) should go
here. Then, validations can be performed on whole models (e.g. after parsing or before
writing) and provide a comprehensive list of warnings of different degree.
"""
import re
from typing import Dict, Tuple
import warnings
from ..exceptions import (
AdvisoryIsaValidationWarning,
CriticalIsaValidationWarning,
ModerateIsaValidationWarning,
)
from .helpers import is_ontology_term_ref
from . import models
from .validate_assay_study import _OntologyTermRefValidator
__author__ = "Mathias Kuhring <mathias.kuhring@bihealth.de>"
# Pattern and helper functions for validation ------------------------------------------------------
# DATE_PATTERN = re.compile("^\\d{4}-(0[1-9]|1[0-2])-(0[1-9]|[12]\\d|3[01])$")
MAIL_PATTERN = re.compile("^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$")
PHONE_PATTERN = re.compile("^\\+?[\\d /()-]+$") # only checks characters!
DOI_PATTERN = re.compile("^(?:(?:DOI|doi):)?10[.][0-9]{4,}(?:[.][0-9]+)*/\\S+$")
PMID_PATTERN = re.compile("^\\d+$")
def _validate_mail_address(mail_address) -> str:
"""Helper function to validate mail strings"""
if mail_address and not MAIL_PATTERN.match(mail_address):
tpl = "Invalid mail address: {}"
msg = tpl.format(mail_address)
warnings.warn(msg, AdvisoryIsaValidationWarning)
def _validate_phone_number(phone_number) -> str:
"""Helper function to validate phone/fax number strings"""
if phone_number and not PHONE_PATTERN.match(phone_number):
tpl = "Invalid phone/fax number: {}"
msg = tpl.format(phone_number)
warnings.warn(msg, AdvisoryIsaValidationWarning)
def _validate_doi(doi) -> str:
"""Helper function to validate doi strings"""
if doi and not DOI_PATTERN.match(doi):
tpl = "Invalid doi string: {}"
msg = tpl.format(doi)
warnings.warn(msg, AdvisoryIsaValidationWarning)
def _validate_pubmed_id(pubmed_id) -> str:
"""Helper function to validate pubmed id strings"""
if pubmed_id and not PMID_PATTERN.match(pubmed_id):
tpl = "Invalid pubmed_id string: {}"
msg = tpl.format(pubmed_id)
warnings.warn(msg, AdvisoryIsaValidationWarning)
# Validator classes --------------------------------------------------------------------
class InvestigationValidator:
"""
Validator for Investigation
:type investigation: models.InvestigationInfo
:param investigation: The investigation model to validate
"""
def __init__(self, investigation: models.InvestigationInfo):
self._investigation = investigation
self._ontology_validator = _OntologyTermRefValidator(investigation.ontology_source_refs)
self._study_ids = set()
self._study_paths = set()
self._study_titles = set()
self._assay_paths = set()
def validate(self):
"""Validate the investigation"""
self._validate_ontology_sources()
self._validate_sections()
def _validate_ontology_sources(self):
for source in self._investigation.ontology_source_refs.values():
# Check that ontology sources are complete
if not all((source.name, source.file, source.version, source.description)):
tpl = "Incomplete ontology source; found: {}, {}, {}, {}, {}"
msg = tpl.format(
source.name, source.file, source.version, source.description, source.comments
)
warnings.warn(msg, CriticalIsaValidationWarning)
# Check that ontology source names contain no whitespaces
if re.search("\\s", source.name):
tpl = "Ontology source name including whitespace(s); found: {}, {}, {}, {}, {}"
msg = tpl.format(
source.name, source.file, source.version, source.description, source.comments
)
warnings.warn(msg, AdvisoryIsaValidationWarning)
def _validate_sections(self):
self._validate_investigation_info()
self._validate_publications(self._investigation.publications)
self._validate_contacts(self._investigation.contacts)
self._validate_studies()
def _validate_investigation_info(self):
info = self._investigation.info
# If only one study is available, metadata should be recorded in the study section
# (https://isa-specs.readthedocs.io/en/latest/isatab.html#investigation-section)
if len(self._investigation.studies) == 1:
if any((info.title, info.description, info.submission_date, info.public_release_date)):
tpl = (
"Investigation with only one study contains metadata:\n\tID:\t{}\n\tTitle:\t"
"{}\n\tPath:\t{}\n\tSubmission Date:\t{}\n\tPublic Release Date:\t{"
"}\n\tPrefer recording metadata in the study section."
)
msg = tpl.format(
info.identifier,
info.title,
info.path or "",
info.description,
info.submission_date,
info.public_release_date,
)
warnings.warn(msg, ModerateIsaValidationWarning)
# If more than one study is available, investigation should at least contain an id and title
else:
# Validate availability of investigation identifier
if not info.identifier:
tpl = "Investigation without identifier:\nTitle:\t{}\nPath:\t{}"
msg = tpl.format(info.title, info.path or "")
warnings.warn(msg, ModerateIsaValidationWarning)
# Validate availability of investigation title
if not info.title:
tpl = "Investigation without title:\nID:\t{}\nPath:\t{}"
msg = tpl.format(info.identifier, info.path or "")
warnings.warn(msg, ModerateIsaValidationWarning)
def _validate_studies(self):
# Check if any study exists
if not self._investigation.studies:
tpl = "No studies declared in investigation: {}"
msg = tpl.format(self._investigation.info.path)
warnings.warn(msg, CriticalIsaValidationWarning)
return
for study in self._investigation.studies:
# Validate availability of minimal study information (ids, paths, titles)
if not (study.info.identifier and study.info.path):
tpl = (
"Study with incomplete minimal information (ID and path):"
"\nID:\t{}\nTitle:\t{}\nPath:\t{}"
)
msg = tpl.format(study.info.identifier, study.info.title, study.info.path or "")
warnings.warn(msg, CriticalIsaValidationWarning)
if not study.info.title:
tpl = "Study without title:\nID:\t{}\nTitle:\t{}\nPath:\t{}"
msg = tpl.format(study.info.identifier, study.info.title, study.info.path or "")
warnings.warn(msg, ModerateIsaValidationWarning)
# Assure distinct studies, i.e. unique ids, paths and preferably titles
if study.info.identifier in self._study_ids:
tpl = "Study identifier used more than once: {}"
msg = tpl.format(study.info.identifier)
warnings.warn(msg, CriticalIsaValidationWarning)
else:
self._study_ids.add(study.info.identifier)
if study.info.path:
if study.info.path in self._study_paths:
tpl = "Study path used more than once: {}"
msg = tpl.format(study.info.path or "")
warnings.warn(msg, CriticalIsaValidationWarning)
else:
self._study_paths.add(study.info.path)
if study.info.title:
if study.info.title in self._study_titles:
tpl = "Study title used more than once: {}"
msg = tpl.format(study.info.title)
warnings.warn(msg, ModerateIsaValidationWarning)
else:
self._study_titles.add(study.info.title)
# Validate study sections
self._validate_publications(study.publications)
self._validate_contacts(study.contacts)
self._validate_designs(study.designs)
self._validate_factors(study.factors)
self._validate_assays(study.assays, study.info.identifier)
self._validate_protocols(study.protocols)
def _validate_publications(self, publications: Tuple[models.PublicationInfo]):
# Validate format of specific fields in publications
for publication in publications:
_validate_pubmed_id(publication.pubmed_id)
_validate_doi(publication.doi)
if is_ontology_term_ref(publication.status):
self._ontology_validator.validate(publication.status)
def _validate_contacts(self, contacts: Tuple[models.ContactInfo]):
# Validate format of specific fields in contacts
for contact in contacts:
_validate_mail_address(contact.email)
_validate_phone_number(contact.phone)
_validate_phone_number(contact.fax)
if is_ontology_term_ref(contact.role):
self._ontology_validator.validate(contact.role)
def _validate_designs(self, designs: Tuple[models.DesignDescriptorsInfo]):
# Validate format of specific fields in designs
for design in designs:
if is_ontology_term_ref(design.type):
self._ontology_validator.validate(design.type)
def _validate_factors(self, factors: Dict[str, models.FactorInfo]):
# Validate format of specific fields in factors
for factor in factors.values():
if is_ontology_term_ref(factor.type):
self._ontology_validator.validate(factor.type)
def _validate_assays(self, assays: Tuple[models.AssayInfo], study_id: str):
# Check if any assays exists (according to specs, having an assays is not mandatory)
if not assays:
tpl = "No assays declared in study '{}' of investigation '{}'"
msg = tpl.format(study_id, self._investigation.info.path)
warnings.warn(msg, AdvisoryIsaValidationWarning)
return
for assay in assays:
# Validate availability of minimal assay information
# (path, measurement type, technology type and technology platform)
meas_type = (
assay.measurement_type.name
if is_ontology_term_ref(assay.measurement_type)
else assay.measurement_type
)
tech_type = (
assay.technology_type.name
if is_ontology_term_ref(assay.technology_type)
else assay.technology_type
)
if not (assay.path and meas_type and tech_type):
tpl = (
"Assay with incomplete minimal information (path, measurement and "
"technology type):\nPath:\t{}\nMeasurement Type:\t{}\nTechnology Type:\t{"
"}\nTechnology Platform:\t{}"
)
msg = tpl.format(assay.path or "", meas_type, tech_type, assay.platform)
warnings.warn(msg, CriticalIsaValidationWarning)
if not assay.platform:
tpl = (
"Assay without platform:\nPath:\t{}"
"\nMeasurement Type:\t{}\nTechnology Type:\t{}\nTechnology Platform:\t{}"
)
msg = tpl.format(assay.path or "", meas_type, tech_type, assay.platform)
warnings.warn(msg, AdvisoryIsaValidationWarning)
# Assure distinct assays, i.e. unique paths
if assay.path:
if assay.path in self._assay_paths:
tpl = "Assay path used more than once: {}"
msg = tpl.format(assay.path or "")
warnings.warn(msg, CriticalIsaValidationWarning)
else:
self._assay_paths.add(assay.path)
# Validate format of specific fields in assays
if is_ontology_term_ref(assay.measurement_type):
self._ontology_validator.validate(assay.measurement_type)
if is_ontology_term_ref(assay.technology_type):
self._ontology_validator.validate(assay.technology_type)
def _validate_protocols(self, protocols: Dict[str, models.ProtocolInfo]):
# Validate format of specific fields in protocols
for protocol in protocols.values():
if is_ontology_term_ref(protocol.type):
self._ontology_validator.validate(protocol.type)
for parameter in protocol.parameters.values():
if is_ontology_term_ref(parameter):
self._ontology_validator.validate(parameter)
for component in protocol.components.values():
if is_ontology_term_ref(component.type):
self._ontology_validator.validate(component.type)
| 46.832753
| 100
| 0.616621
|
836a56b4e896362593a1c2b9e89d467f1d3e2218
| 2,588
|
py
|
Python
|
tools/tcmalloc/print-live-objects.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
tools/tcmalloc/print-live-objects.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113
|
2015-05-04T09:58:14.000Z
|
2022-01-31T19:35:03.000Z
|
tools/tcmalloc/print-live-objects.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Symbolizes and prints live objects as recorded by tcmalloc's
HeapProfilerDumpLiveObjects.
"""
from __future__ import print_function
import os
import re
import subprocess
import sys
import tempfile
def usage():
print("""\
Usage:
tools/tcmalloc/print-live-objects.py out/Debug/chrome leaks.dmp
""")
def LoadDump(dump_file):
result = []
leakfmt = re.compile(
r"^\s*1:\s*(\d+)\s*\[\s*1:\s*\d+\]\s*@(0x[a-f0-9]+)((\s+0x[a-f0-9]+)*)$")
line_no = 0
with open(dump_file) as f:
for line in f:
line_no = line_no + 1
matches = leakfmt.match(line)
if not matches:
print("%s: could not parse line %d, skipping" % (dump_file, line_no))
else:
trace = { "size": int(matches.group(1)),
"address": matches.group(2),
"frames": matches.group(3).strip().split(" ")}
result.append(trace)
return result
def Symbolize(binary, traces):
addresses = set()
for trace in traces:
for frame in trace["frames"]:
addresses.add(frame)
addr_file, addr_filename = tempfile.mkstemp()
for addr in addresses:
os.write(addr_file, "%s\n" % addr)
os.close(addr_file)
syms = subprocess.Popen([
"addr2line", "-f", "-C", "-e", binary, "@%s" % addr_filename],
stdout=subprocess.PIPE).communicate()[0].strip().split("\n")
table = {}
cwd = os.getcwd()
for address, symbol, location in zip(addresses, syms[::2], syms[1::2]):
if location != "??:0":
filename, line = location.split(":")
filename = os.path.realpath(filename)[len(cwd)+1:]
location = "%s:%s" % (filename, line)
table[address] = { "name": symbol, "location": location }
for trace in traces:
frames = []
for frame in trace["frames"]:
frames.append(table[frame])
trace["frames"] = frames
def Main(argv):
if sys.platform != 'linux2':
print('print-live-objects.py requires addr2line only present on Linux.')
sys.exit(1)
if len(argv) != 3:
usage()
sys.exit(1)
traces = LoadDump(argv[2])
Symbolize(argv[1], traces)
if not traces:
print("No leaks found!")
for trace in sorted(traces, key=lambda x: -x["size"]):
print("Leak of %d bytes at address %s" % (trace["size"], trace["address"]))
for frame in trace["frames"]:
print(" %s (%s)" % (frame["name"], frame["location"]))
print("")
if __name__ == '__main__':
Main(sys.argv)
| 27.242105
| 79
| 0.618238
|
2c83cdca31604beba0040af1e7e9b8537ff54c1f
| 597
|
py
|
Python
|
blender/arm/logicnode/variable/LN_retain_value.py
|
Blacophis/Blender-addons
|
8e91e5e84c15ed4d9ddc16f85a2bb00178ba0f91
|
[
"Zlib"
] | 1
|
2022-03-07T22:04:26.000Z
|
2022-03-07T22:04:26.000Z
|
blender/arm/logicnode/variable/LN_retain_value.py
|
Blacophis/Blender-addons
|
8e91e5e84c15ed4d9ddc16f85a2bb00178ba0f91
|
[
"Zlib"
] | null | null | null |
blender/arm/logicnode/variable/LN_retain_value.py
|
Blacophis/Blender-addons
|
8e91e5e84c15ed4d9ddc16f85a2bb00178ba0f91
|
[
"Zlib"
] | null | null | null |
from arm.logicnode.arm_nodes import *
class RetainValueNode(ArmLogicTreeNode):
"""Retains the input value
@input Retain: Retains the value when exeuted.
@input Value: The value that should be retained.
"""
bl_idname = 'LNRetainValueNode'
bl_label = 'Retain Value'
arm_section = 'set'
arm_version = 1
def arm_init(self, context):
self.add_input('ArmNodeSocketAction', 'Retain')
self.add_input('ArmDynamicSocket', 'Value', is_var=True)
self.add_output('ArmDynamicSocket', 'Value')
self.add_output('ArmNodeSocketAction', 'Out')
| 29.85
| 64
| 0.683417
|
00b1bb37a1a46566ac2cc19741af2f41344b3d77
| 5,528
|
py
|
Python
|
First course/2nd semester/Python3/pirates 4 lab.py
|
tekcellat/University
|
9a0196a45c9cf33ac58018d636c3e4857eba0330
|
[
"MIT"
] | null | null | null |
First course/2nd semester/Python3/pirates 4 lab.py
|
tekcellat/University
|
9a0196a45c9cf33ac58018d636c3e4857eba0330
|
[
"MIT"
] | null | null | null |
First course/2nd semester/Python3/pirates 4 lab.py
|
tekcellat/University
|
9a0196a45c9cf33ac58018d636c3e4857eba0330
|
[
"MIT"
] | 7
|
2020-12-04T07:26:46.000Z
|
2022-03-08T17:47:47.000Z
|
import pygame
from pygame import *
from math import *
WIN_WIDTH = 900; WIN_HEIGHT = 640
BACKGROUND_COLOR ='#00BFFF'
sea_width = 0
sea_size = Rect((0,round(WIN_HEIGHT*(2/3))),(WIN_WIDTH,WIN_HEIGHT))
island_size = Rect((round(WIN_WIDTH*(1/3)),370),(round(WIN_WIDTH*(1/3)),160))
def rot_center(image, angle):
"""rotate an image while keeping its center and size"""
orig_rect = image.get_rect()
rot_image = pygame.transform.rotate(image, angle)
rot_rect = orig_rect.copy()
rot_rect.center = rot_image.get_rect().center
rot_image = rot_image.subsurface(rot_rect).copy()
return rot_image
def main():
pygame.init()
DISPLAY = (WIN_WIDTH, WIN_HEIGHT)
screen = pygame.display.set_mode(DISPLAY)
pygame.display.set_caption('Возможно здесь должен быть заголовок')
bg=Surface((WIN_WIDTH,WIN_HEIGHT))
def SKY():
global sky
sky = Surface((200,100),SRCALPHA)
draw.ellipse(sky,Color('white'),Rect((50,15),(100,40)),0)
draw.ellipse(sky,Color('white'),Rect((100,40),(100,40)),0)
draw.ellipse(sky,Color('white'),Rect((50,65),(100,40)),0)
draw.ellipse(sky,Color('white'),Rect((0,40),(100,40)),0)
draw.ellipse(sky,Color('white'),Rect((20,55),(100,40)),0)
draw.ellipse(sky,Color('white'),Rect((20,30),(100,40)),0)
draw.ellipse(sky,Color('white'),Rect((85,30),(100,40)),0)
draw.ellipse(sky,Color('white'),Rect((85,55),(100,40)),0)
def BOAT():
global boat
boat = Surface((300,200),SRCALPHA)
draw.polygon(boat,Color('#FF8C00'),[(50,150),(260,150),(260,130),(295,130),(295,190),(90,190)],0)
draw.line(boat,Color('#FF8C00'),(50,150),(0,140),3)
draw.line(boat,Color('#FF8C00'),(120,30),(120,150),4)
draw.line(boat,Color('#FF8C00'),(200,10),(200,150),4)
draw.line(boat,Color('#FF8C00'),(292,120),(292,130),4)
draw.polygon(boat,Color('red'),[(200,10),(220,10),(215,15),(220,20),(200,20)],0)
x=75
for i in range(12):
draw.ellipse(boat,Color('#2F4F4F'),Rect((x,160),(15,15)),0)
x+=17
draw.lines(boat,Color('black'),False,[(0,140),(120,30),(200,30),(292,120)],2)
draw.arc(boat,Color('White'),[142,30,120,120],pi/2,3*pi/2,5)
draw.arc(boat,Color('White'),[90,30,75,120],pi/2,3*pi/2,5)
draw.arc(boat,Color('White'),[0,120,55,50],pi/180*170,pi/180*(350),5)
def FISH():
global fish
fish = Surface((100,100),SRCALPHA)
# fish.fill(Color('white'))
draw.ellipse(fish,Color('#008080'),Rect((20,30),(80,40)),0)
draw.polygon(fish,Color('#008080'),[(0,30),(30,40),(30,60),(0,70),(18,50)],0)
draw.polygon(fish,Color('#008080'),[(0,30),(30,40),(30,60),(0,70),(18,50)],0)
draw.polygon(fish,Color('#008080'),[(50,15),(50,35),(80,35)],0)
draw.polygon(fish,Color('#008080'),[(40,60),(40,80),(70,60)],0)
draw.polygon(fish,Color('#0000CD'),[(80,55),(100,50),(100,60)],0)
draw.ellipse(fish,Color('black'),Rect((80,40),(10,10)),0)
t = 0
x = 0
flag = True
SKY(); BOAT(); FISH()
while flag:
for e in event.get():
if e.type == QUIT: flag = False
screen.blit(bg, (0,0))
draw.rect(bg, Color(BACKGROUND_COLOR), Rect(0,0,WIN_WIDTH, WIN_HEIGHT) , 0)
#солнце
draw.ellipse(bg,Color('yellow'),Rect((750,30),(80,80)),0)
g = 0
while g<=pi*2:
draw.line(bg,Color('yellow'),(130*cos(g)+790,130*sin(g)+70),\
(790,70),2)
g+=(pi*2)/8
p = pi*2/16
while p<=pi*2:
draw.line(bg,Color('yellow'),(100*cos(p)+790,100*sin(p)+70),(790,70),\
2)
p+=pi*2/16
bg.blit(sky,(t/4,20))
bg.blit(sky,(1000-t/4,130))
draw.ellipse(bg,Color('#FFD700'),island_size,sea_width)
draw.rect(bg, Color('#0000CD'), sea_size, sea_width)
y1=350; y2=370
for i in range(11):
draw.polygon(bg,Color('#CD853F'),[(435,y1),(465,y1),(460,y2),(440,y2)],sea_width)
y1-=20;y2-=20
draw.polygon(bg,Color('#7FFF00'),[(450,150),(550,70),(530,100),(600,90),(540,120),(590,120),(530,130),(570,140)],sea_width)
draw.polygon(bg,Color('#7FFF00'),[(450,150),(350,70),(380,100),(310,90),(370,120),(330,120),(380,130),(350,140)],sea_width)
draw.polygon(bg,Color('#7FFF00'),[(450,150),(550,160),(530,170),(570,180),(550,190),(600,220),(540,210)],sea_width)
draw.polygon(bg,Color('#7FFF00'),[(450,150),(350,160),(370,170),(330,180),(350,190),(300,220),(360,210)],sea_width)
draw.lines(bg,Color('#000000'),False,[(480,380),(485,380),(490,360),(495,380),(500,380)],3)
draw.lines(bg,Color('#000000'),False,[(481,365),(482,350),(498,350),(501,335)],3)
draw.line(bg,Color('#000000'),(490,350),(490,360),3)
draw.ellipse(bg,Color('#000000'),Rect((483,338),(15,15)),3)
bg.blit(rot_center(fish, 50*cos(0.02*t)), (t/3, 460+50*atan(-50*sin(0.02*t)*0.02)))
draw.rect(bg,Color('#0000CD'),Rect((0,510),(WIN_WIDTH,WIN_HEIGHT)),0)
bg.blit(boat,(900-t/3,400))
t += 0.5
if t > 5000:
t = 0
display.update()
main()
| 44.224
| 132
| 0.535094
|
8566fd859e0452ef686b2904f740766f37ccba40
| 7,015
|
py
|
Python
|
sympy/solvers/ode/tests/test_single.py
|
amartinhernan/sympy
|
8501526ca26a80ac1b7866a4c1678fcf0045e971
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/solvers/ode/tests/test_single.py
|
amartinhernan/sympy
|
8501526ca26a80ac1b7866a4c1678fcf0045e971
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/solvers/ode/tests/test_single.py
|
amartinhernan/sympy
|
8501526ca26a80ac1b7866a4c1678fcf0045e971
|
[
"BSD-3-Clause"
] | null | null | null |
#
# The main tests for the code in single.py are currently located in
# sympy/solvers/tests/test_ode.py
#
from sympy import (Derivative, diff,
Eq, exp, log, Rational, sin,
symbols, Ei)
from sympy.core import Function, Symbol
from sympy.solvers.ode import dsolve
from sympy.solvers.ode.ode import _remove_redundant_solutions
from sympy.solvers.ode.single import (FirstLinear, ODEMatchError,
SingleODEProblem, SingleODESolver)
from sympy.solvers.ode.subscheck import checkodesol
from sympy.testing.pytest import XFAIL, raises
x = Symbol('x')
f = Function('f')
g = Function('g')
C1, C2, C3 = symbols('C1:4')
def test_SingleODESolver():
# Test that not implemented methods give NotImplementedError
# Subclasses should override these methods.
problem = SingleODEProblem(f(x).diff(x), f(x), x)
solver = SingleODESolver(problem)
raises(NotImplementedError, lambda: solver.matches())
raises(NotImplementedError, lambda: solver.get_general_solution())
raises(NotImplementedError, lambda: solver._matches())
raises(NotImplementedError, lambda: solver._get_general_solution())
# This ODE can not be solved by the FirstLinear solver. Here we test that
# it does not match and the asking for a general solution gives
# ODEMatchError
problem = SingleODEProblem(f(x).diff(x) + f(x)*f(x), f(x), x)
solver = FirstLinear(problem)
raises(ODEMatchError, lambda: solver.get_general_solution())
solver = FirstLinear(problem)
assert solver.matches() is False
#These are just test for order of ODE
problem = SingleODEProblem(f(x).diff(x) + f(x), f(x), x)
assert problem.order == 1
problem = SingleODEProblem(f(x).diff(x,4) + f(x).diff(x,2) - f(x).diff(x,3), f(x), x)
assert problem.order == 4
def test_nth_algebraic():
eqn = Eq(Derivative(f(x), x), Derivative(g(x), x))
sol = Eq(f(x), C1 + g(x))
assert checkodesol(eqn, sol, order=1, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), hint='nth_algebraic'), dsolve(eqn, f(x), hint='nth_algebraic')
assert sol == dsolve(eqn, f(x))
eqn = (diff(f(x)) - x)*(diff(f(x)) + x)
sol = [Eq(f(x), C1 - x**2/2), Eq(f(x), C1 + x**2/2)]
assert checkodesol(eqn, sol, order=1, solve_for_func=False)[0]
assert set(sol) == set(dsolve(eqn, f(x), hint='nth_algebraic'))
assert set(sol) == set(dsolve(eqn, f(x)))
eqn = (1 - sin(f(x))) * f(x).diff(x)
sol = Eq(f(x), C1)
assert checkodesol(eqn, sol, order=1, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), hint='nth_algebraic')
assert sol == dsolve(eqn, f(x))
M, m, r, t = symbols('M m r t')
phi = Function('phi')
eqn = Eq(-M * phi(t).diff(t),
Rational(3, 2) * m * r**2 * phi(t).diff(t) * phi(t).diff(t,t))
solns = [Eq(phi(t), C1), Eq(phi(t), C1 + C2*t - M*t**2/(3*m*r**2))]
assert checkodesol(eqn, solns[0], order=2, solve_for_func=False)[0]
assert checkodesol(eqn, solns[1], order=2, solve_for_func=False)[0]
assert set(solns) == set(dsolve(eqn, phi(t), hint='nth_algebraic'))
assert set(solns) == set(dsolve(eqn, phi(t)))
eqn = f(x) * f(x).diff(x) * f(x).diff(x, x)
sol = Eq(f(x), C1 + C2*x)
assert checkodesol(eqn, sol, order=1, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), hint='nth_algebraic')
assert sol == dsolve(eqn, f(x))
eqn = f(x) * f(x).diff(x) * f(x).diff(x, x) * (f(x) - 1)
sol = Eq(f(x), C1 + C2*x)
assert checkodesol(eqn, sol, order=1, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), hint='nth_algebraic')
assert sol == dsolve(eqn, f(x))
eqn = f(x) * f(x).diff(x) * f(x).diff(x, x) * (f(x) - 1) * (f(x).diff(x) - x)
solns = [Eq(f(x), C1 + x**2/2), Eq(f(x), C1 + C2*x)]
assert checkodesol(eqn, solns[0], order=2, solve_for_func=False)[0]
assert checkodesol(eqn, solns[1], order=2, solve_for_func=False)[0]
assert set(solns) == set(dsolve(eqn, f(x), hint='nth_algebraic'))
assert set(solns) == set(dsolve(eqn, f(x)))
def test_nth_algebraic_issue15999():
eqn = f(x).diff(x) - C1
sol = Eq(f(x), C1*x + C2) # Correct solution
assert checkodesol(eqn, sol, order=1, solve_for_func=False) == (True, 0)
assert dsolve(eqn, f(x), hint='nth_algebraic') == sol
assert dsolve(eqn, f(x)) == sol
def test_nth_algebraic_redundant_solutions():
# This one has a redundant solution that should be removed
eqn = f(x)*f(x).diff(x)
soln = Eq(f(x), C1)
assert checkodesol(eqn, soln, order=1, solve_for_func=False)[0]
assert soln == dsolve(eqn, f(x), hint='nth_algebraic')
assert soln == dsolve(eqn, f(x))
# This has two integral solutions and no algebraic solutions
eqn = (diff(f(x)) - x)*(diff(f(x)) + x)
sol = [Eq(f(x), C1 - x**2/2), Eq(f(x), C1 + x**2/2)]
assert all(c[0] for c in checkodesol(eqn, sol, order=1, solve_for_func=False))
assert set(sol) == set(dsolve(eqn, f(x), hint='nth_algebraic'))
assert set(sol) == set(dsolve(eqn, f(x)))
eqn = f(x) + f(x)*f(x).diff(x)
solns = [Eq(f(x), 0),
Eq(f(x), C1 - x)]
assert all(c[0] for c in checkodesol(eqn, solns, order=1, solve_for_func=False))
assert set(solns) == set(dsolve(eqn, f(x)))
solns = [Eq(f(x), exp(x)),
Eq(f(x), C1*exp(C2*x))]
solns_final = _remove_redundant_solutions(eqn, solns, 2, x)
assert solns_final == [Eq(f(x), C1*exp(C2*x))]
# This one needs a substitution f' = g.
eqn = -exp(x) + (x*Derivative(f(x), (x, 2)) + Derivative(f(x), x))/x
sol = Eq(f(x), C1 + C2*log(x) + exp(x) - Ei(x))
assert checkodesol(eqn, sol, order=2, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x))
#
# These tests can be combined with the above test if they get fixed
# so that dsolve actually works in all these cases.
#
# prep = True breaks this
def test_nth_algebraic_noprep1():
eqn = Derivative(x*f(x), x, x, x)
sol = Eq(f(x), (C1 + C2*x + C3*x**2) / x)
assert checkodesol(eqn, sol, order=3, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), prep=False, hint='nth_algebraic')
@XFAIL
def test_nth_algebraic_prep1():
eqn = Derivative(x*f(x), x, x, x)
sol = Eq(f(x), (C1 + C2*x + C3*x**2) / x)
assert checkodesol(eqn, sol, order=3, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), prep=True, hint='nth_algebraic')
assert sol == dsolve(eqn, f(x))
# prep = True breaks this
def test_nth_algebraic_noprep2():
eqn = Eq(Derivative(x*Derivative(f(x), x), x)/x, exp(x))
sol = Eq(f(x), C1 + C2*log(x) + exp(x) - Ei(x))
assert checkodesol(eqn, sol, order=2, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), prep=False, hint='nth_algebraic')
@XFAIL
def test_nth_algebraic_prep2():
eqn = Eq(Derivative(x*Derivative(f(x), x), x)/x, exp(x))
sol = Eq(f(x), C1 + C2*log(x) + exp(x) - Ei(x))
assert checkodesol(eqn, sol, order=2, solve_for_func=False)[0]
assert sol == dsolve(eqn, f(x), prep=True, hint='nth_algebraic')
assert sol == dsolve(eqn, f(x))
| 37.918919
| 98
| 0.62851
|
c72c70a898dbdf97781c4cdb2d1c74d04453733a
| 27,791
|
py
|
Python
|
components/org.apache.stratos.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/modules/event/eventhandler.py
|
lahirus/stratos
|
3a50768a87ccf2668a4c08d36fde00b70260dd07
|
[
"Apache-2.0"
] | null | null | null |
components/org.apache.stratos.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/modules/event/eventhandler.py
|
lahirus/stratos
|
3a50768a87ccf2668a4c08d36fde00b70260dd07
|
[
"Apache-2.0"
] | null | null | null |
components/org.apache.stratos.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/modules/event/eventhandler.py
|
lahirus/stratos
|
3a50768a87ccf2668a4c08d36fde00b70260dd07
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from threading import Thread
from ..util import cartridgeagentutils
from ..artifactmgt.git.agentgithandler import *
from ..artifactmgt.repository import Repository
from config import Config
import publisher
from entity import *
from ..util.log import LogFactory
import constants
SUPER_TENANT_ID = -1234
SUPER_TENANT_REPO_PATH = "/repository/deployment/server/"
TENANT_REPO_PATH = "/repository/tenants/"
class EventHandler:
"""
Event execution related logic
"""
def __init__(self):
self.__log = LogFactory().get_log(__name__)
def on_instance_started_event(self):
self.__log.debug("Processing instance started event...")
# TODO: copy artifacts extension
self.execute_event_extendables(constants.INSTANCE_STARTED_EVENT, {})
def create_dummy_interface(self):
self.__log.debug("Processing lvs dummy interface creation...")
lvs_vip = Config.lvs_virtual_ip.split("|")
self.__log.debug("LVS dummy interface creation values %s %s " %(lvs_vip[0], lvs_vip[1]) )
self.execute_event_extendables(constants.CREATE_LVS_DUMMY_INTERFACE,
{"EVENT": constants.CREATE_LVS_DUMMY_INTERFACE, "LVS_DUMMY_VIRTUAL_IP": lvs_vip[0],
"LVS_SUBNET_MASK": lvs_vip[1]})
def on_instance_activated_event(self):
self.__log.debug("Processing instance activated event...")
self.execute_event_extendables(constants.INSTANCE_ACTIVATED_EVENT, {})
def on_artifact_updated_event(self, artifacts_updated_event):
self.__log.info("Processing Artifact update event: [tenant] %s [cluster] %s [status] %s" %
(str(artifacts_updated_event.tenant_id),
artifacts_updated_event.cluster_id,
artifacts_updated_event.status))
cluster_id_event = str(artifacts_updated_event.cluster_id).strip()
cluster_id_payload = Config.cluster_id
repo_url = str(artifacts_updated_event.repo_url).strip()
if (repo_url != "") and (cluster_id_payload is not None) and (cluster_id_payload == cluster_id_event):
local_repo_path = Config.app_path
repo_password = None
if artifacts_updated_event.repo_password is not None:
secret = Config.cartridge_key
repo_password = cartridgeagentutils.decrypt_password(artifacts_updated_event.repo_password, secret)
repo_username = artifacts_updated_event.repo_username
tenant_id = artifacts_updated_event.tenant_id
is_multitenant = Config.is_multiTenant
commit_enabled = artifacts_updated_event.commit_enabled
self.__log.info("Executing git checkout")
if local_repo_path is None:
raise GitRepositorySynchronizationException("Repository path is empty. Cannot perform Git operations.")
# create repo object
local_repo_path = self.get_repo_path_for_tenant(str(tenant_id), local_repo_path, is_multitenant)
repo_info = Repository(repo_url, repo_username, repo_password, local_repo_path, tenant_id, commit_enabled)
# checkout code
subscribe_run, updated = AgentGitHandler.checkout(repo_info)
# execute artifact updated extension
plugin_values = {"ARTIFACT_UPDATED_CLUSTER_ID": artifacts_updated_event.cluster_id,
"ARTIFACT_UPDATED_TENANT_ID": artifacts_updated_event.tenant_id,
"ARTIFACT_UPDATED_REPO_URL": artifacts_updated_event.repo_url,
"ARTIFACT_UPDATED_REPO_PASSWORD": artifacts_updated_event.repo_password,
"ARTIFACT_UPDATED_REPO_USERNAME": artifacts_updated_event.repo_username,
"ARTIFACT_UPDATED_STATUS": artifacts_updated_event.status}
self.execute_event_extendables(constants.ARTIFACT_UPDATED_EVENT, plugin_values)
if subscribe_run:
# publish instanceActivated
publisher.publish_instance_activated_event(Config.health_stat_plugin)
elif updated:
# updated on pull
self.on_artifact_update_scheduler_event(tenant_id)
update_artifacts = Config.read_property(constants.ENABLE_ARTIFACT_UPDATE, False)
if update_artifacts:
auto_commit = Config.is_commits_enabled
auto_checkout = Config.is_checkout_enabled
try:
update_interval = int(Config.artifact_update_interval)
except ValueError:
self.__log.exception("Invalid artifact sync interval specified.")
update_interval = 10
self.__log.info("Artifact updating task enabled, update interval: %s seconds" % update_interval)
self.__log.info("Auto Commit is turned %s " % ("on" if auto_commit else "off"))
self.__log.info("Auto Checkout is turned %s " % ("on" if auto_checkout else "off"))
AgentGitHandler.schedule_artifact_update_task(
repo_info,
auto_checkout,
auto_commit,
update_interval)
def on_artifact_update_scheduler_event(self, tenant_id):
self.__log.info("Processing Artifact update scheduler event...")
plugin_values = {"ARTIFACT_UPDATED_TENANT_ID": str(tenant_id),
"ARTIFACT_UPDATED_SCHEDULER": str(True)}
self.execute_event_extendables("ArtifactUpdateSchedulerEvent", plugin_values)
def on_instance_cleanup_cluster_event(self):
self.__log.info("Processing instance cleanup cluster event...")
self.cleanup(constants.INSTANCE_CLEANUP_CLUSTER_EVENT)
def on_instance_cleanup_member_event(self):
self.__log.info("Processing instance cleanup member event...")
self.cleanup(constants.INSTANCE_CLEANUP_MEMBER_EVENT)
def on_member_activated_event(self, member_activated_event):
self.__log.info("Processing Member activated event: [service] %r [cluster] %r [member] %r"
% (member_activated_event.service_name,
member_activated_event.cluster_id,
member_activated_event.member_id))
member_initialized = self.is_member_initialized_in_topology(
member_activated_event.service_name,
member_activated_event.cluster_id,
member_activated_event.member_id)
if not member_initialized:
self.__log.error("Member has not initialized, failed to execute member activated event")
return
self.execute_event_extendables(constants.MEMBER_ACTIVATED_EVENT, {})
def on_complete_topology_event(self, complete_topology_event):
self.__log.debug("Processing Complete topology event...")
service_name_in_payload = Config.service_name
cluster_id_in_payload = Config.cluster_id
member_id_in_payload = Config.member_id
member_initialized = self.is_member_initialized_in_topology(
service_name_in_payload,
cluster_id_in_payload,
member_id_in_payload)
self.__log.debug("Member initialized %s", member_initialized)
if member_initialized:
# Set cartridge agent as initialized since member is available and it is in initialized state
Config.initialized = True
topology = complete_topology_event.get_topology()
service = topology.get_service(service_name_in_payload)
cluster = service.get_cluster(cluster_id_in_payload)
plugin_values = {"TOPOLOGY_JSON": json.dumps(topology.json_str),
"MEMBER_LIST_JSON": json.dumps(cluster.member_list_json)}
self.execute_event_extendables(constants.COMPLETE_TOPOLOGY_EVENT, plugin_values)
def on_member_initialized_event(self):
"""
Member initialized event is sent by cloud controller once volume attachment and
ip address allocation is completed successfully
:return:
"""
self.__log.debug("Processing Member initialized event...")
service_name_in_payload = Config.service_name
cluster_id_in_payload = Config.cluster_id
member_id_in_payload = Config.member_id
member_exists = self.member_exists_in_topology(service_name_in_payload, cluster_id_in_payload,
member_id_in_payload)
self.__log.debug("Member exists: %s" % member_exists)
if member_exists:
Config.initialized = True
self.execute_event_extendables(constants.MEMBER_INITIALIZED_EVENT, {})
def on_complete_tenant_event(self, complete_tenant_event):
self.__log.debug("Processing Complete tenant event...")
tenant_list_json = complete_tenant_event.tenant_list_json
self.__log.debug("Complete tenants:" + json.dumps(tenant_list_json))
plugin_values = {"TENANT_LIST_JSON": json.dumps(tenant_list_json)}
self.execute_event_extendables(constants.COMPLETE_TENANT_EVENT, plugin_values)
def on_member_terminated_event(self, member_terminated_event):
self.__log.info("Processing Member terminated event: [service] %s [cluster] %s [member] %s" %
(member_terminated_event.service_name, member_terminated_event.cluster_id,
member_terminated_event.member_id))
member_initialized = self.is_member_initialized_in_topology(
member_terminated_event.service_name,
member_terminated_event.cluster_id,
member_terminated_event.member_id
)
if not member_initialized:
self.__log.error("Member has not initialized, failed to execute member terminated event")
return
self.execute_event_extendables(constants.MEMBER_TERMINATED_EVENT, {})
def on_member_suspended_event(self, member_suspended_event):
self.__log.info("Processing Member suspended event: [service] %s [cluster] %s [member] %s" %
(member_suspended_event.service_name, member_suspended_event.cluster_id,
member_suspended_event.member_id))
member_initialized = self.is_member_initialized_in_topology(
member_suspended_event.service_name,
member_suspended_event.cluster_id,
member_suspended_event.member_id
)
if not member_initialized:
self.__log.error("Member has not initialized, failed to execute member suspended event")
return
self.execute_event_extendables(constants.MEMBER_SUSPENDED_EVENT, {})
def on_member_started_event(self, member_started_event):
self.__log.info("Processing Member started event: [service] %s [cluster] %s [member] %s" %
(member_started_event.service_name, member_started_event.cluster_id,
member_started_event.member_id))
member_initialized = self.is_member_initialized_in_topology(
member_started_event.service_name,
member_started_event.cluster_id,
member_started_event.member_id
)
if not member_initialized:
self.__log.error("Member has not initialized, failed to execute member started event")
return
self.execute_event_extendables(constants.MEMBER_STARTED_EVENT, {})
def start_server_extension(self):
self.__log.info("Processing start server extension...")
service_name_in_payload = Config.service_name
cluster_id_in_payload = Config.cluster_id
member_id_in_payload = Config.member_id
member_initialized = self.is_member_initialized_in_topology(service_name_in_payload, cluster_id_in_payload,
member_id_in_payload)
if not member_initialized:
self.__log.error("Member has not initialized, failed to execute start server event")
return
self.execute_event_extendables("StartServers", {})
def volume_mount_extension(self, persistence_mappings_payload):
self.__log.info("Processing volume mount extension...")
self.execute_event_extendables("VolumeMount", persistence_mappings_payload)
def on_domain_mapping_added_event(self, domain_mapping_added_event):
tenant_domain = EventHandler.find_tenant_domain(domain_mapping_added_event.tenant_id)
self.__log.info(
"Processing Domain mapping added event: [tenant-id] " + str(domain_mapping_added_event.tenant_id) +
" [tenant-domain] " + tenant_domain + " [domain-name] " + domain_mapping_added_event.domain_name +
" [application-context] " + domain_mapping_added_event.application_context
)
plugin_values = {"SUBSCRIPTION_APPLICATION_ID": domain_mapping_added_event.application_id,
"SUBSCRIPTION_SERVICE_NAME": domain_mapping_added_event.service_name,
"SUBSCRIPTION_DOMAIN_NAME": domain_mapping_added_event.domain_name,
"SUBSCRIPTION_CLUSTER_ID": domain_mapping_added_event.cluster_id,
"SUBSCRIPTION_TENANT_ID": int(domain_mapping_added_event.tenant_id),
"SUBSCRIPTION_TENANT_DOMAIN": tenant_domain,
"SUBSCRIPTION_CONTEXT_PATH":
domain_mapping_added_event.context_path}
self.execute_event_extendables(constants.DOMAIN_MAPPING_ADDED_EVENT, plugin_values)
def on_domain_mapping_removed_event(self, domain_mapping_removed_event):
tenant_domain = EventHandler.find_tenant_domain(domain_mapping_removed_event.tenant_id)
self.__log.info(
"Domain mapping removed event received: [tenant-id] " + str(domain_mapping_removed_event.tenant_id) +
" [tenant-domain] " + tenant_domain + " [domain-name] " + domain_mapping_removed_event.domain_name
)
plugin_values = {"SUBSCRIPTION_APPLICATION_ID": domain_mapping_removed_event.application_id,
"SUBSCRIPTION_SERVICE_NAME": domain_mapping_removed_event.service_name,
"SUBSCRIPTION_DOMAIN_NAME": domain_mapping_removed_event.domain_name,
"SUBSCRIPTION_CLUSTER_ID": domain_mapping_removed_event.cluster_id,
"SUBSCRIPTION_TENANT_ID": int(domain_mapping_removed_event.tenant_id),
"SUBSCRIPTION_TENANT_DOMAIN": tenant_domain}
self.execute_event_extendables(constants.DOMAIN_MAPPING_REMOVED_EVENT, plugin_values)
def on_copy_artifacts_extension(self, src, dest):
self.__log.info("Processing Copy artifacts extension...")
plugin_values = {"SOURCE": src, "DEST": dest}
self.execute_event_extendables("CopyArtifacts", plugin_values)
def on_tenant_subscribed_event(self, tenant_subscribed_event):
self.__log.info(
"Processing Tenant subscribed event: [tenant] " + str(tenant_subscribed_event.tenant_id) +
" [service] " + tenant_subscribed_event.service_name + " [cluster] " + tenant_subscribed_event.cluster_ids
)
self.execute_event_extendables(constants.TENANT_SUBSCRIBED_EVENT, {})
def on_application_signup_removed_event(self, application_signup_removal_event):
self.__log.info(
"Processing Tenant unsubscribed event: [tenant] " + str(application_signup_removal_event.tenantId) +
" [application ID] " + str(application_signup_removal_event.applicationId)
)
if Config.application_id == application_signup_removal_event.applicationId:
AgentGitHandler.remove_repo(application_signup_removal_event.tenantId)
self.execute_event_extendables(constants.APPLICATION_SIGNUP_REMOVAL_EVENT, {})
def cleanup(self, event):
self.__log.info("Executing cleaning up the data in the cartridge instance...")
publisher.publish_maintenance_mode_event()
self.execute_event_extendables("clean", {})
self.__log.info("cleaning up finished in the cartridge instance...")
self.__log.info("publishing ready to shutdown event...")
publisher.publish_instance_ready_to_shutdown_event()
def execute_event_extendables(self, event, input_values):
""" Execute the extensions and plugins related to the event
:param event: The event name string
:param input_values: the values to be passed to the plugin
:return:
"""
try:
input_values = EventHandler.add_common_input_values(input_values)
input_values["EVENT"] = event
except Exception as e:
self.__log.error("Error while adding common input values for event extendables: %s" % e)
# Execute the extension
self.execute_extension_for_event(event, input_values)
# Execute the plugins
self.execute_plugins_for_event(event, input_values)
def execute_plugins_for_event(self, event, input_values):
""" For each plugin registered for the specified event, start a plugin execution thread
:param str event: The event name string
:param dict input_values: the values to be passed to the plugin
:return:
"""
try:
plugins_for_event = Config.plugins.get(event)
if plugins_for_event is not None:
for plugin_info in plugins_for_event:
self.__log.debug("Executing plugin %s for event %s" % (plugin_info.name, event))
plugin_thread = PluginExecutor(plugin_info, input_values)
plugin_thread.start()
# block till plugin run completes.
plugin_thread.join()
else:
self.__log.debug("No plugins registered for event %s" % event)
except Exception as e:
self.__log.exception("Error while executing plugin for event %s: %s" % (event, e))
def execute_extension_for_event(self, event, extension_values):
""" Execute the extension related to the event
:param event: The event name string
:param extension_values: the values to be passed to the plugin
:return:
"""
try:
if Config.extension_executor is not None:
self.__log.debug("Executing extension for event [%s]" % event)
extension_thread = PluginExecutor(Config.extension_executor, extension_values)
extension_thread.start()
# block till plugin run completes.
extension_thread.join()
else:
self.__log.debug("No extensions registered for event %s" % event)
except OSError:
self.__log.warn("No extension was found for event %s" % event)
except Exception as e:
self.__log.exception("Error while executing extension for event %s: %s" % (event, e))
def get_repo_path_for_tenant(self, tenant_id, git_local_repo_path, is_multitenant):
""" Finds the repository path for tenant to clone from the remote repository
:param tenant_id:
:param git_local_repo_path:
:param is_multitenant:
:return:
"""
repo_path = ""
if is_multitenant:
if tenant_id == SUPER_TENANT_ID:
# super tenant, /repository/deploy/server/
super_tenant_repo_path = Config.super_tenant_repository_path
# "app_path"
repo_path += git_local_repo_path
if super_tenant_repo_path is not None and super_tenant_repo_path != "":
super_tenant_repo_path = super_tenant_repo_path if super_tenant_repo_path.startswith("/") \
else "/" + super_tenant_repo_path
super_tenant_repo_path = super_tenant_repo_path if super_tenant_repo_path.endswith("/") \
else super_tenant_repo_path + "/"
# "app_path/repository/deploy/server/"
repo_path += super_tenant_repo_path
else:
# "app_path/repository/deploy/server/"
repo_path += SUPER_TENANT_REPO_PATH
else:
# normal tenant, /repository/tenants/tenant_id
tenant_repo_path = Config.tenant_repository_path
# "app_path"
repo_path += git_local_repo_path
if tenant_repo_path is not None and tenant_repo_path != "":
tenant_repo_path = tenant_repo_path if tenant_repo_path.startswith("/") else "/" + tenant_repo_path
tenant_repo_path = tenant_repo_path if tenant_repo_path.endswith("/") else tenant_repo_path + "/"
# "app_path/repository/tenants/244653444"
repo_path += tenant_repo_path + tenant_id
else:
# "app_path/repository/tenants/244653444"
repo_path += TENANT_REPO_PATH + tenant_id
# tenant_dir_path = git_local_repo_path + AgentGitHandler.TENANT_REPO_PATH + tenant_id
# GitUtils.create_dir(repo_path)
else:
# not multi tenant, app_path
repo_path = git_local_repo_path
self.__log.debug("Repo path returned : %r" % repo_path)
return repo_path
def is_member_initialized_in_topology(self, service_name, cluster_id, member_id):
if self.member_exists_in_topology(service_name, cluster_id, member_id):
topology = TopologyContext.get_topology()
service = topology.get_service(service_name)
cluster = service.get_cluster(cluster_id)
found_member = cluster.get_member(member_id)
if found_member.status == MemberStatus.Initialized:
return True
return False
def member_exists_in_topology(self, service_name, cluster_id, member_id):
topology = TopologyContext.get_topology()
service = topology.get_service(service_name)
if service is None:
self.__log.error("Service not found in topology [service] %s" % service_name)
return False
cluster = service.get_cluster(cluster_id)
if cluster is None:
self.__log.error("Cluster id not found in topology [cluster] %s" % cluster_id)
return False
activated_member = cluster.get_member(member_id)
if activated_member is None:
self.__log.error("Member id not found in topology [member] %s" % member_id)
return False
return True
@staticmethod
def add_common_input_values(plugin_values):
"""
Adds the common parameters to be used by the extension scripts
:param dict[str, str] plugin_values: Dictionary to be added
:return: Dictionary with updated parameters
:rtype: dict[str, str]
"""
if plugin_values is None:
plugin_values = {}
elif type(plugin_values) != dict:
plugin_values = {"VALUE1": str(plugin_values)}
plugin_values["APPLICATION_PATH"] = Config.app_path
plugin_values["PARAM_FILE_PATH"] = Config.read_property(constants.PARAM_FILE_PATH, False)
plugin_values["PERSISTENCE_MAPPINGS"] = Config.persistence_mappings
lb_cluster_id_in_payload = Config.lb_cluster_id
lb_private_ip, lb_public_ip = EventHandler.get_lb_member_ip(lb_cluster_id_in_payload)
plugin_values["LB_IP"] = lb_private_ip if lb_private_ip is not None else Config.lb_private_ip
plugin_values["LB_PUBLIC_IP"] = lb_public_ip if lb_public_ip is not None else Config.lb_public_ip
topology = TopologyContext.get_topology()
if topology.initialized:
service = topology.get_service(Config.service_name)
cluster = service.get_cluster(Config.cluster_id)
member_id_in_payload = Config.member_id
member = cluster.get_member(member_id_in_payload)
EventHandler.add_properties(service.properties, plugin_values, "SERVICE_PROPERTY")
EventHandler.add_properties(cluster.properties, plugin_values, "CLUSTER_PROPERTY")
EventHandler.add_properties(member.properties, plugin_values, "MEMBER_PROPERTY")
plugin_values.update(Config.get_payload_params())
return EventHandler.clean_process_parameters(plugin_values)
@staticmethod
def add_properties(properties, params, prefix):
"""
Adds the given property list to the parameters list with given prefix in the parameter name
:param dict[str, str] properties: service properties
:param dict[str, str] params:
:param str prefix:
:return: dict[str, str]
"""
if properties is None or properties.items() is None:
return
for key in properties:
params[prefix + "_" + key] = str(properties[key])
@staticmethod
def get_lb_member_ip(lb_cluster_id):
topology = TopologyContext.get_topology()
services = topology.get_services()
for service in services:
clusters = service.get_clusters()
for cluster in clusters:
members = cluster.get_members()
for member in members:
if member.cluster_id == lb_cluster_id:
return member.member_default_private_ip, member.member_default_public_ip
return None, None
@staticmethod
def clean_process_parameters(params):
"""
Removes any null valued parameters before passing them to the extension scripts
:param dict params:
:return: cleaned parameters
:rtype: dict
"""
for key, value in params.items():
if value is None:
del params[key]
return params
@staticmethod
def find_tenant_domain(tenant_id):
tenant = TenantContext.get_tenant(tenant_id)
if tenant is None:
raise RuntimeError("Tenant could not be found: [tenant-id] %s" % str(tenant_id))
return tenant.tenant_domain
class PluginExecutor(Thread):
""" Executes a given plugin on a separate thread, passing the given dictionary of values to the plugin entry method
"""
def __init__(self, plugin_info, values):
Thread.__init__(self)
self.__plugin_info = plugin_info
self.__values = values
self.__log = LogFactory().get_log(__name__)
def run(self):
try:
self.__plugin_info.plugin_object.run_plugin(self.__values)
except Exception as e:
self.__log.exception("Error while executing plugin %s: %s" % (self.__plugin_info.name, e))
| 45.633826
| 122
| 0.66759
|
d2b6fad63dc5019ec429152c03b565d16f253ed8
| 1,217
|
py
|
Python
|
krit/teams/models.py
|
huroncg/krit-teams
|
ce96a49de44496c2f86e37dd917c51952fbbdeed
|
[
"BSD-3-Clause"
] | null | null | null |
krit/teams/models.py
|
huroncg/krit-teams
|
ce96a49de44496c2f86e37dd917c51952fbbdeed
|
[
"BSD-3-Clause"
] | null | null | null |
krit/teams/models.py
|
huroncg/krit-teams
|
ce96a49de44496c2f86e37dd917c51952fbbdeed
|
[
"BSD-3-Clause"
] | 1
|
2021-02-26T01:38:35.000Z
|
2021-02-26T01:38:35.000Z
|
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from .conf import settings
def load_model_class(model_path):
dot = model_path.rindex('.')
module_name = model_path[:dot]
class_name = model_path[dot + 1:]
try:
_class = getattr(import_module(module_name), class_name)
return _class
except (ImportError, AttributeError):
raise ImproperlyConfigured('%s cannot be imported' % model_path)
class Team(load_model_class(settings.KRIT_TEAMS_BASE_TEAM_MODEL)):
class Meta(load_model_class(settings.KRIT_TEAMS_BASE_TEAM_MODEL).Meta):
verbose_name = "Team"
verbose_name_plural = "Teams"
class Membership(load_model_class(settings.KRIT_TEAMS_BASE_MEMBERSHIP_MODEL)):
team = models.ForeignKey(Team,
related_name="memberships",
verbose_name="team",
on_delete=models.CASCADE)
class Meta(load_model_class(settings.KRIT_TEAMS_BASE_MEMBERSHIP_MODEL).Meta):
unique_together = [("team", "user", "invitation")]
verbose_name = "Membership"
verbose_name_plural = "Memberships"
| 32.891892
| 81
| 0.691044
|
6b61cf29dd081f5373b969dfc184bdc763f01f03
| 1,279
|
py
|
Python
|
tests/benchmark.py
|
teald/vplanet
|
ab640af7d2bbf80c5ea647d5bf971e2ce2c40631
|
[
"MIT"
] | 92
|
2018-08-02T22:41:23.000Z
|
2022-01-13T20:59:43.000Z
|
tests/benchmark.py
|
teald/vplanet
|
ab640af7d2bbf80c5ea647d5bf971e2ce2c40631
|
[
"MIT"
] | 104
|
2018-08-01T17:28:02.000Z
|
2022-03-31T22:23:13.000Z
|
tests/benchmark.py
|
teald/vplanet
|
ab640af7d2bbf80c5ea647d5bf971e2ce2c40631
|
[
"MIT"
] | 35
|
2018-06-20T19:32:09.000Z
|
2022-01-31T00:58:53.000Z
|
import vplanet
import numpy as np
import os
import pytest
import functools
import inspect
import astropy.units as u
def recursive_getattr(obj, attr, *args):
_getattr = lambda obj, attr: getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split("."))
class Benchmark:
def test_benchmark(self, vplanet_output, param, value, unit, param_options):
# The value returned by vplanet
output_value = recursive_getattr(vplanet_output, param)
# Are we comparing a specific index of an array?
index = param_options.pop("index", None)
if index is not None:
output_value = output_value[index]
# The expected value
benchmark_value = value * unit
# Check
assert np.allclose(output_value, benchmark_value, **param_options)
def benchmark(args_dict):
args = []
for param, v in args_dict.items():
value = v.pop("value", 0.0)
unit = v.pop("unit", u.Unit(""))
marks = v.pop("marks", None)
if marks is not None:
args.append(pytest.param(param, value, unit, v, marks=marks))
else:
args.append(pytest.param(param, value, unit, v))
return pytest.mark.parametrize("param,value,unit,param_options", args)
| 29.744186
| 80
| 0.649726
|
ae54e350b87574e28fe2bfc9ec1fa97e63c65f22
| 2,633
|
py
|
Python
|
information/views.py
|
ajosephau/hivery-coding-challenge
|
41d1d35b14448aecd136bf80df64935e6ad2c4aa
|
[
"MIT"
] | null | null | null |
information/views.py
|
ajosephau/hivery-coding-challenge
|
41d1d35b14448aecd136bf80df64935e6ad2c4aa
|
[
"MIT"
] | 7
|
2022-03-14T12:12:47.000Z
|
2022-03-28T12:17:49.000Z
|
information/views.py
|
ajosephau/django_coding_challenge
|
4912ec9c344ab7e6f2fc6189fb10a5edea9b586d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# fmt: off
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.request import Request
from rest_framework.response import Response
from information.models import Company, Person
from information.serializers import (CompanySerializer,
MutualFriendsDetailSerializer,
PersonWithFoodByTypeSerializer)
# fmt: on
@api_view(["GET"])
def company_by_index(request: Request, index: int) -> Response:
"""
Given a company, the API needs to return all their employees.
Returns an empty list if a company doesn't have any employees.
"""
try:
company = Company.objects.get(index=index)
except Company.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == "GET":
serializer = CompanySerializer(company)
return Response(serializer.data)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
@api_view(["GET"])
def mutual_friends_alive_with_brown_eyes(
request: Request, person_one_index: int, person_two_index: int
) -> Response:
"""
Given 2 people, returns their information (Name, Age, Address, phone) and the list of their
friends in common which have brown eyes and are still alive.
"""
try:
person_one = Person.objects.get(index=person_one_index)
person_two = Person.objects.get(index=person_two_index)
except Person.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == "GET":
mutual_friends = Person.objects.alive_mutual_friends_brown_eyes(
person_one, person_two
)
serializer = MutualFriendsDetailSerializer(
person_one,
context={
"person_one": person_one,
"person_two": person_two,
"mutual_friends": mutual_friends,
},
)
return Response(serializer.data)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
@api_view(["GET"])
def food_for_person_by_index(request: Request, index: int) -> Response:
"""
Given 1 person, returns a list of fruits and vegetables they like.
"""
try:
person = Person.objects.get(index=index)
except Person.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == "GET":
serializer = PersonWithFoodByTypeSerializer(person)
return Response(serializer.data)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
| 32.9125
| 95
| 0.673376
|
73409c344403b38b3f4e1a3525333b1a4ac3a95a
| 2,107
|
py
|
Python
|
utils/results_manager.py
|
eshanking/fears
|
8d69af08c5aba9fefdbf962ab568c2ca58276c0d
|
[
"MIT"
] | null | null | null |
utils/results_manager.py
|
eshanking/fears
|
8d69af08c5aba9fefdbf962ab568c2ca58276c0d
|
[
"MIT"
] | null | null | null |
utils/results_manager.py
|
eshanking/fears
|
8d69af08c5aba9fefdbf962ab568c2ca58276c0d
|
[
"MIT"
] | 1
|
2021-11-09T14:42:01.000Z
|
2021-11-09T14:42:01.000Z
|
import os
import pickle
import pandas as pd
from fears.utils import dir_manager
from fears.classes.experiment_class import Experiment
# from fears.classes.experiment_class import Experiment
def get_experiment_results(suffix=None,exp=None):
"""
Parameters
----------
data_folder : str
Name (not path) of the results folder within /fears/results/
exp_info_file : str
Name (not path) of the experiment info file (.p) within
/fears/results/
Returns
-------
experiment_folders : list
list of experiment folder paths
exp_info : Experiment class object
Experiment class object used to run the experiment
"""
# exp_info_path = dir_manager.make_resultspath_absolute(exp_info_file)
# results_dir = dir_manager.make_resultspath_absolute(data_folder)
if exp is None:
exp_info_file = 'results_' + suffix + os.sep + 'experiment_info_' + suffix + '.p'
exp_info_path = dir_manager.make_resultspath_absolute(exp_info_file)
exp_info = pickle.load(open(exp_info_path,'rb')) # load experiment info
else:
exp_info = exp
# experiment_folders = sorted(os.listdir(path=results_dir))
# experiment_folders = [x for x in experiment_folders if x != '.DS_Store']
# experiment_folders = [results_dir + os.sep + x
# for x in experiment_folders]
experiment_folders = exp_info.exp_folders
return experiment_folders, exp_info
def get_data(sim_path):
"""
Parameters
----------
sim_path : str
Path of the simulation run to load
Returns
-------
data : numpy array
Data (typically simulation counts)
"""
data_df = pd.read_csv(sim_path)
data = data_df.to_numpy()
return data
def save_fig(fig,savename,bbox_inches='tight'):
# fig.margins(0,0)
savename = dir_manager.make_figurepath_absolute(savename)
fig.savefig(savename,bbox_inches=bbox_inches,
dpi=400,
transparent=True,
facecolor='w',
edgecolor='w')
| 28.863014
| 89
| 0.652587
|
b57dcfaf5ff893773de92e39746a83425cf1d37b
| 2,682
|
py
|
Python
|
qpesums-netcdf-tools/qpesums_netcdf_convert.py
|
Fondus/Fondus-Python-SDK
|
ce6cfaffe5d978ac728f35b1b4bfcf49f7519576
|
[
"Apache-2.0"
] | null | null | null |
qpesums-netcdf-tools/qpesums_netcdf_convert.py
|
Fondus/Fondus-Python-SDK
|
ce6cfaffe5d978ac728f35b1b4bfcf49f7519576
|
[
"Apache-2.0"
] | null | null | null |
qpesums-netcdf-tools/qpesums_netcdf_convert.py
|
Fondus/Fondus-Python-SDK
|
ce6cfaffe5d978ac728f35b1b4bfcf49f7519576
|
[
"Apache-2.0"
] | 1
|
2021-08-05T06:28:36.000Z
|
2021-08-05T06:28:36.000Z
|
import argparse,datetime,math
import geojson
from netCDF4 import Dataset
def get_range(v,low,high):
dim=v.get_dims()[0].size
d=(v[1]-v[0])/2
i0=None
i1=dim
for i in range(0,dim):
if i0 is None and v[i]+d >= low:
i0=i
if v[i]-d <= high:
i1=i
if not i0: i0=0
return [i0,i1]
parser = argparse.ArgumentParser()
parser.add_argument("--input", help="source NC filename")
parser.add_argument("--output", help="output geojson filename")
parser.add_argument("--bbox", help="bounding box to filter grids, format as x1,y1,x2,y2")
args=parser.parse_args()
input_filename=args.input if args.input else "QPESUMS.nc"
print("Input file:", input_filename)
output_filename=args.output if args.output else "QPESUMS.json"
print("Output file:", output_filename)
bbox=list(map(lambda s:float(s),args.bbox.split(','))) if args.bbox else [-180.0,-90.0,180,90.0]
print("Bounding box:", bbox)
qpesums = Dataset(input_filename, "r")
print("NETCDF metadata")
print(qpesums.dimensions)
print(qpesums.variables)
v_time=qpesums.variables['time']
## print(v_time[:])
v_lon=qpesums.variables['x']
v_lat=qpesums.variables['y']
dx=(v_lon[1]-v_lon[0])/2
dy=(v_lat[1]-v_lat[0])/2
lon_range=get_range(v_lon,bbox[0],bbox[2])
lat_range=get_range(v_lat,bbox[1],bbox[3])
v_data=qpesums.variables['precipitation_observed']
## cache all data in memory
time=v_time[:]
lon=v_lon[:]
lat=v_lat[:]
data=v_data[:]
dim_t=qpesums.dimensions['time'].size
features=[]
for x in range(lon_range[0],lon_range[1]):
for y in range(lat_range[0],lat_range[1]):
cx=lon[x]
cy=lat[y]
id='x'+f'{cx:.4f}'+'y'+f'{cy:.4f}'
rect=geojson.Polygon([[(cx-dx,cy-dy),(cx+dx,cy-dy),(cx+dx,cy+dy),(cx-dx,cy+dy),(cx-dx,cy-dy)]])
properties={}
## dump only last hour for now:
## for t in range(0,dim_t):
for t in range(0,1):
ts=datetime.datetime.fromtimestamp(time[dim_t-t-1]*60,datetime.timezone.utc).isoformat()
properties['time_t'+str(t)]=ts
v=float(data[dim_t-t-1,y,x])
if math.isnan(v):
print("Skip no data grid at t" + str(t), id)
else:
properties['value_t'+str(t)]=float(data[dim_t-t-1,y,x])
feature=geojson.Feature(
geometry=rect,
properties=properties,
id=id
)
features.append(feature)
if not feature.is_valid: print(feature.id, feature.is_valid)
qpesums.close()
output=geojson.FeatureCollection(features)
print("Geojson is_valid:", output.is_valid)
file_output=geojson.dumps(output)
with open(output_filename,'w') as f:
f.write(file_output)
| 28.231579
| 103
| 0.643922
|
293fe934bec9b803386fab62ca4499aed85568a3
| 397
|
py
|
Python
|
config/api_router.py
|
ruankranz/blog
|
d83adc9035bb71f839e8e1c74a036f99be7f9d18
|
[
"MIT"
] | null | null | null |
config/api_router.py
|
ruankranz/blog
|
d83adc9035bb71f839e8e1c74a036f99be7f9d18
|
[
"MIT"
] | 1
|
2021-05-11T12:43:52.000Z
|
2021-05-11T12:43:52.000Z
|
config/api_router.py
|
ruankranz/blog
|
d83adc9035bb71f839e8e1c74a036f99be7f9d18
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from rest_framework.routers import DefaultRouter, SimpleRouter
from krankit.users.api.views import UserViewSet
from krankit.blog.api.views import PostViewSet
if settings.DEBUG:
router = DefaultRouter()
else:
router = SimpleRouter()
router.register("users", UserViewSet)
router.register("posts", PostViewSet)
app_name = "api"
urlpatterns = router.urls
| 22.055556
| 62
| 0.788413
|
0a9bf4d555291bd7c0f7ca0922337e9ba0d3a40a
| 9,371
|
py
|
Python
|
csbdeep/scripts/care_predict.py
|
Takuya1031/CSBDeep
|
75877938b329173fb33cc19b2d96a773ed000b62
|
[
"BSD-3-Clause"
] | 1
|
2020-12-10T16:52:16.000Z
|
2020-12-10T16:52:16.000Z
|
csbdeep/scripts/care_predict.py
|
Takuya1031/CSBDeep
|
75877938b329173fb33cc19b2d96a773ed000b62
|
[
"BSD-3-Clause"
] | null | null | null |
csbdeep/scripts/care_predict.py
|
Takuya1031/CSBDeep
|
75877938b329173fb33cc19b2d96a773ed000b62
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function, unicode_literals, absolute_import, division
import argparse
import sys
from pprint import pprint
import numpy as np
from tqdm import tqdm
from csbdeep.io import save_tiff_imagej_compatible
from csbdeep.utils import _raise, axes_check_and_normalize
from csbdeep.utils.six import Path
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--quiet', metavar='', type=str2bool, required=False, const=True, nargs='?', default=False, help="don't show status messages")
parser.add_argument('--gpu-memory-limit', metavar='', type=float, required=False, default=None, help="limit GPU memory to this fraction (0...1)")
data = parser.add_argument_group("input")
data.add_argument('--input-dir', metavar='', type=str, required=False, default=None, help="path to folder with input images")
data.add_argument('--input-pattern', metavar='', type=str, required=False, default='*.tif*', help="glob-style file name pattern of input images")
data.add_argument('--input-axes', metavar='', type=str, required=False, default=None, help="axes string of input images")
data.add_argument('--norm-pmin', metavar='', type=float, required=False, default=2, help="'pmin' for PercentileNormalizer")
data.add_argument('--norm-pmax', metavar='', type=float, required=False, default=99.8, help="'pmax' for PercentileNormalizer")
data.add_argument('--norm-undo', metavar='', type=str2bool, required=False, const=True, nargs='?', default=True, help="'do_after' for PercentileNormalizer")
data.add_argument('--n-tiles', metavar='', type=int, required=False, nargs='+', default=None, help="number of tiles for prediction")
model = parser.add_argument_group("model")
model.add_argument('--model-basedir', metavar='', type=str, required=False, default=None, help="path to folder that contains CARE model")
model.add_argument('--model-name', metavar='', type=str, required=False, default=None, help="name of CARE model")
model.add_argument('--model-weights', metavar='', type=str, required=False, default=None, help="specific name of weights file to load (located in model folder)")
output = parser.add_argument_group("output")
output.add_argument('--output-dir', metavar='', type=str, required=False, default=None, help="path to folder where restored images will be saved")
output.add_argument('--output-name', metavar='', type=str, required=False, default='{model_name}/{file_path}/{file_name}{file_ext}', help="name pattern of restored image (special tokens: {file_path}, {file_name}, {file_ext}, {model_name}, {model_weights})")
output.add_argument('--output-dtype', metavar='', type=str, required=False, default='float32', help="data type of the saved tiff file")
output.add_argument('--imagej-tiff', metavar='', type=str2bool, required=False, const=True, nargs='?', default=True, help="save restored image as ImageJ-compatible TIFF file")
output.add_argument('--dry-run', metavar='', type=str2bool, required=False, const=True, nargs='?', default=False, help="don't save restored images")
return parser, parser.parse_args()
def main():
if not ('__file__' in locals() or '__file__' in globals()):
print('running interactively, exiting.')
sys.exit(0)
# parse arguments
parser, args = parse_args()
args_dict = vars(args)
# exit and show help if no arguments provided at all
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
# check for required arguments manually (because of argparse issue)
required = ('--input-dir','--input-axes', '--norm-pmin', '--norm-pmax', '--model-basedir', '--model-name', '--output-dir')
for r in required:
dest = r[2:].replace('-','_')
if args_dict[dest] is None:
parser.print_usage(file=sys.stderr)
print("%s: error: the following arguments are required: %s" % (parser.prog,r), file=sys.stderr)
sys.exit(1)
# show effective arguments (including defaults)
if not args.quiet:
print('Arguments')
print('---------')
pprint(args_dict)
print()
sys.stdout.flush()
# logging function
log = (lambda *a,**k: None) if args.quiet else tqdm.write
# get list of input files and exit if there are none
file_list = list(Path(args.input_dir).glob(args.input_pattern))
if len(file_list) == 0:
log("No files to process in '%s' with pattern '%s'." % (args.input_dir,args.input_pattern))
sys.exit(0)
# delay imports after checking to all required arguments are provided
from tifffile import imread, imsave
import keras.backend as K
from csbdeep.models import CARE
from csbdeep.data import PercentileNormalizer
sys.stdout.flush()
sys.stderr.flush()
# limit gpu memory
if args.gpu_memory_limit is not None:
from csbdeep.utils.tf import limit_gpu_memory
limit_gpu_memory(args.gpu_memory_limit)
# create CARE model and load weights, create normalizer
K.clear_session()
model = CARE(config=None, name=args.model_name, basedir=args.model_basedir)
if args.model_weights is not None:
print("Loading network weights from '%s'." % args.model_weights)
model.load_weights(args.model_weights)
normalizer = PercentileNormalizer(pmin=args.norm_pmin, pmax=args.norm_pmax, do_after=args.norm_undo)
n_tiles = args.n_tiles
if n_tiles is not None and len(n_tiles)==1:
n_tiles = n_tiles[0]
processed = []
# process all files
for file_in in tqdm(file_list, disable=args.quiet or (n_tiles is not None and np.prod(n_tiles)>1)):
# construct output file name
file_out = Path(args.output_dir) / args.output_name.format (
file_path = str(file_in.relative_to(args.input_dir).parent),
file_name = file_in.stem, file_ext = file_in.suffix,
model_name = args.model_name, model_weights = Path(args.model_weights).stem if args.model_weights is not None else None
)
# checks
(file_in.suffix.lower() in ('.tif','.tiff') and
file_out.suffix.lower() in ('.tif','.tiff')) or _raise(ValueError('only tiff files supported.'))
# load and predict restored image
img = imread(str(file_in))
restored = model.predict(img, axes=args.input_axes, normalizer=normalizer, n_tiles=n_tiles)
# restored image could be multi-channel even if input image is not
axes_out = axes_check_and_normalize(args.input_axes)
if restored.ndim > img.ndim:
assert restored.ndim == img.ndim + 1
assert 'C' not in axes_out
axes_out += 'C'
# convert data type (if necessary)
restored = restored.astype(np.dtype(args.output_dtype), copy=False)
# save to disk
if not args.dry_run:
file_out.parent.mkdir(parents=True, exist_ok=True)
if args.imagej_tiff:
save_tiff_imagej_compatible(str(file_out), restored, axes_out)
else:
imsave(str(file_out), restored)
processed.append((file_in,file_out))
# print summary of processed files
if not args.quiet:
sys.stdout.flush()
sys.stderr.flush()
n_processed = len(processed)
len_processed = len(str(n_processed))
log('Finished processing %d %s' % (n_processed, 'files' if n_processed > 1 else 'file'))
log('-' * (26+len_processed if n_processed > 1 else 26))
for i,(file_in,file_out) in enumerate(processed):
len_file = max(len(str(file_in)),len(str(file_out)))
log(('{:>%d}. in : {:>%d}'%(len_processed,len_file)).format(1+i,str(file_in)))
log(('{:>%d} out: {:>%d}'%(len_processed,len_file)).format('',str(file_out)))
if __name__ == '__main__':
main()
| 52.943503
| 295
| 0.585316
|
682d261222ce1db16a4f77133fdfeaadd2f3cce8
| 26,606
|
py
|
Python
|
test/aqua/test_svm_classical.py
|
Sahar2/qiskit-aqua
|
a228fbe6b9613cff43e47796a7e4843deba2b051
|
[
"Apache-2.0"
] | null | null | null |
test/aqua/test_svm_classical.py
|
Sahar2/qiskit-aqua
|
a228fbe6b9613cff43e47796a7e4843deba2b051
|
[
"Apache-2.0"
] | null | null | null |
test/aqua/test_svm_classical.py
|
Sahar2/qiskit-aqua
|
a228fbe6b9613cff43e47796a7e4843deba2b051
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import numpy as np
from qiskit.aqua import aqua_globals
from qiskit.aqua import run_algorithm
from qiskit.aqua.input import ClassificationInput
from test.aqua.common import QiskitAquaTestCase
class TestSVMClassical(QiskitAquaTestCase):
def setUp(self):
super().setUp()
aqua_globals.random_seed = 10598
pass
def test_classical_binary(self):
training_input = {'A': np.asarray([[0.6560706, 0.17605998],
[0.25776033, 0.47628296],
[0.79687342, 0.26933706],
[0.39016555, -0.08469916],
[0.3994399, 0.13601573],
[0.26752049, -0.03978988],
[0.24026485, 0.01953518],
[0.49490503, 0.17239737],
[0.70171827, 0.5323737],
[0.43221576, 0.42357294],
[0.62864856, 0.45504447],
[0.6259567, 0.30917324],
[0.58272403, 0.20760754],
[0.3938784, 0.17184466],
[0.14154948, 0.06201424],
[0.80202323, 0.40582692],
[0.46779595, 0.39946754],
[0.57660199, 0.21821317],
[0.51044761, 0.03699459],
[0.8690704, 0.70847635]]),
'B': np.asarray([[0.38857596, -0.33775802],
[0.49946978, -0.48727951],
[-0.30119743, -0.11221681],
[-0.16479252, -0.08640519],
[-0.21808884, -0.56508327],
[-0.14683258, -0.46528508],
[-0.05888195, -0.51474852],
[0.20517435, -0.66839091],
[0.25475584, -0.21239966],
[0.55194854, 0.02789679],
[-0.11542951, -0.54157026],
[0.44625538, -0.49485869],
[-0.14609118, -0.60719757],
[0.18121305, -0.1922198],
[0.19283785, -0.31798925],
[0.29626405, -0.54563098],
[-0.39044304, -0.36527253],
[-0.29432215, -0.43924164],
[-0.40294517, -0.31381308],
[0.49156185, -0.3660534]])}
test_input = {'A': np.asarray([[0.57483139, 0.47120732],
[0.48372348, 0.25438544],
[0.08791134, 0.11515506],
[0.45988094, 0.32854319],
[0.53015085, 0.41539212],
[0.5073321, 0.47346751],
[0.71081819, 0.19202569],
[1., 0.51698289],
[0.630973, 0.19898666],
[0.48142649, 0.15931707]]),
'B': np.asarray([[-0.06048935, -0.48345293],
[-0.01065613, -0.33910828],
[-0.17323832, -0.49535592],
[0.14043268, -0.87869109],
[-0.15046837, -0.47340207],
[-0.39600934, -0.21647957],
[-0.394202, -0.44705385],
[0.15243621, -0.36695163],
[0.06195634, -0.23262325],
[0.06183066, -0.53376975]])}
temp = [test_input[k] for k in sorted(test_input)]
total_array = np.concatenate(temp)
params = {
'problem': {'name': 'classification'},
'algorithm': {
'name': 'SVM',
}
}
algo_input = ClassificationInput(training_input, test_input, total_array)
result = run_algorithm(params, algo_input)
self.assertEqual(result['testing_accuracy'], 1.0)
self.assertEqual(result['predicted_classes'],
['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A',
'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B'])
def test_classical_multiclass_one_against_all(self):
training_input = {'A': np.asarray([[0.6560706, 0.17605998],
[0.25776033, 0.47628296],
[0.79687342, 0.26933706],
[0.39016555, -0.08469916],
[0.3994399, 0.13601573],
[0.26752049, -0.03978988],
[0.24026485, 0.01953518],
[0.49490503, 0.17239737],
[0.70171827, 0.5323737],
[0.43221576, 0.42357294],
[0.62864856, 0.45504447],
[0.6259567, 0.30917324],
[0.58272403, 0.20760754],
[0.3938784, 0.17184466],
[0.14154948, 0.06201424],
[0.80202323, 0.40582692],
[0.46779595, 0.39946754],
[0.57660199, 0.21821317],
[0.51044761, 0.03699459],
[0.8690704, 0.70847635]]),
'B': np.asarray([[0.38857596, -0.33775802],
[0.49946978, -0.48727951],
[-0.30119743, -0.11221681],
[-0.16479252, -0.08640519],
[-0.21808884, -0.56508327],
[-0.14683258, -0.46528508],
[-0.05888195, -0.51474852],
[0.20517435, -0.66839091],
[0.25475584, -0.21239966],
[0.55194854, 0.02789679],
[-0.11542951, -0.54157026],
[0.44625538, -0.49485869],
[-0.14609118, -0.60719757],
[0.18121305, -0.1922198],
[0.19283785, -0.31798925],
[0.29626405, -0.54563098],
[-0.39044304, -0.36527253],
[-0.29432215, -0.43924164],
[-0.40294517, -0.31381308],
[0.49156185, -0.3660534]]),
'C': np.asarray([[-0.68088231, 0.46824423],
[-0.56167659, 0.65270294],
[-0.54323753, 0.67630888],
[-0.57685569, -0.08515631],
[-0.67765364, 0.19654347],
[-0.62129115, 0.22223066],
[-0.78040851, 0.65247848],
[-0.50730279, 0.59898039],
[-0.64275805, 0.63381998],
[-0.72854201, 0.14151325],
[-0.57004437, 0.12344874],
[-0.55215973, 0.74331215],
[-0.60916047, 0.52006917],
[-0.23093745, 1.],
[-0.84025337, 0.5564536],
[-0.66952391, 0.57918859],
[-0.67725082, 0.60439934],
[-1., 0.23715261],
[-0.62933025, 0.19055405],
[-0.82139073, 0.29941512]])}
test_input = {'A': np.asarray([[0.57483139, 0.47120732],
[0.48372348, 0.25438544],
[0.08791134, 0.11515506],
[0.45988094, 0.32854319],
[0.53015085, 0.41539212],
[0.5073321, 0.47346751],
[0.71081819, 0.19202569],
[1., 0.51698289],
[0.630973, 0.19898666],
[0.48142649, 0.15931707]]),
'B': np.asarray([[-0.06048935, -0.48345293],
[-0.01065613, -0.33910828],
[-0.17323832, -0.49535592],
[0.14043268, -0.87869109],
[-0.15046837, -0.47340207],
[-0.39600934, -0.21647957],
[-0.394202, -0.44705385],
[0.15243621, -0.36695163],
[0.06195634, -0.23262325],
[0.06183066, -0.53376975]]),
'C': np.asarray([[-0.74561108, 0.27047295],
[-0.69942965, 0.11885162],
[-0.52649891, 0.35265538],
[-0.54345106, 0.13113995],
[-0.57181448, 0.13594725],
[-0.33713329, 0.05095243],
[-0.65741384, 0.477976],
[-0.79986067, 0.41733195],
[-0.73856328, 0.80699537],
[-0.66489165, 0.1181712]])}
temp = [test_input[k] for k in sorted(test_input)]
total_array = np.concatenate(temp)
params = {
'problem': {'name': 'classification'},
'algorithm': {
'name': 'SVM'
},
'multiclass_extension': {'name': 'OneAgainstRest'}
}
algo_input = ClassificationInput(training_input, test_input, total_array)
result = run_algorithm(params, algo_input)
self.assertEqual(result['testing_accuracy'], 1.0)
self.assertEqual(result['predicted_classes'],
['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B',
'B', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C'])
def test_classical_multiclass_all_pairs(self):
training_input = {'A': np.asarray([[0.6560706, 0.17605998],
[0.25776033, 0.47628296],
[0.79687342, 0.26933706],
[0.39016555, -0.08469916],
[0.3994399, 0.13601573],
[0.26752049, -0.03978988],
[0.24026485, 0.01953518],
[0.49490503, 0.17239737],
[0.70171827, 0.5323737],
[0.43221576, 0.42357294],
[0.62864856, 0.45504447],
[0.6259567, 0.30917324],
[0.58272403, 0.20760754],
[0.3938784, 0.17184466],
[0.14154948, 0.06201424],
[0.80202323, 0.40582692],
[0.46779595, 0.39946754],
[0.57660199, 0.21821317],
[0.51044761, 0.03699459],
[0.8690704, 0.70847635]]),
'B': np.asarray([[0.38857596, -0.33775802],
[0.49946978, -0.48727951],
[-0.30119743, -0.11221681],
[-0.16479252, -0.08640519],
[-0.21808884, -0.56508327],
[-0.14683258, -0.46528508],
[-0.05888195, -0.51474852],
[0.20517435, -0.66839091],
[0.25475584, -0.21239966],
[0.55194854, 0.02789679],
[-0.11542951, -0.54157026],
[0.44625538, -0.49485869],
[-0.14609118, -0.60719757],
[0.18121305, -0.1922198],
[0.19283785, -0.31798925],
[0.29626405, -0.54563098],
[-0.39044304, -0.36527253],
[-0.29432215, -0.43924164],
[-0.40294517, -0.31381308],
[0.49156185, -0.3660534]]),
'C': np.asarray([[-0.68088231, 0.46824423],
[-0.56167659, 0.65270294],
[-0.54323753, 0.67630888],
[-0.57685569, -0.08515631],
[-0.67765364, 0.19654347],
[-0.62129115, 0.22223066],
[-0.78040851, 0.65247848],
[-0.50730279, 0.59898039],
[-0.64275805, 0.63381998],
[-0.72854201, 0.14151325],
[-0.57004437, 0.12344874],
[-0.55215973, 0.74331215],
[-0.60916047, 0.52006917],
[-0.23093745, 1.],
[-0.84025337, 0.5564536],
[-0.66952391, 0.57918859],
[-0.67725082, 0.60439934],
[-1., 0.23715261],
[-0.62933025, 0.19055405],
[-0.82139073, 0.29941512]])}
test_input = {'A': np.asarray([[0.57483139, 0.47120732],
[0.48372348, 0.25438544],
[0.08791134, 0.11515506],
[0.45988094, 0.32854319],
[0.53015085, 0.41539212],
[0.5073321, 0.47346751],
[0.71081819, 0.19202569],
[1., 0.51698289],
[0.630973, 0.19898666],
[0.48142649, 0.15931707]]),
'B': np.asarray([[-0.06048935, -0.48345293],
[-0.01065613, -0.33910828],
[-0.17323832, -0.49535592],
[0.14043268, -0.87869109],
[-0.15046837, -0.47340207],
[-0.39600934, -0.21647957],
[-0.394202, -0.44705385],
[0.15243621, -0.36695163],
[0.06195634, -0.23262325],
[0.06183066, -0.53376975]]),
'C': np.asarray([[-0.74561108, 0.27047295],
[-0.69942965, 0.11885162],
[-0.52649891, 0.35265538],
[-0.54345106, 0.13113995],
[-0.57181448, 0.13594725],
[-0.33713329, 0.05095243],
[-0.65741384, 0.477976],
[-0.79986067, 0.41733195],
[-0.73856328, 0.80699537],
[-0.66489165, 0.1181712]])}
temp = [test_input[k] for k in sorted(test_input)]
total_array = np.concatenate(temp)
params = {
'problem': {'name': 'classification'},
'algorithm': {
'name': 'SVM'
},
'multiclass_extension': {'name': 'AllPairs'}
}
algo_input = ClassificationInput(training_input, test_input, total_array)
result = run_algorithm(params, algo_input)
self.assertEqual(result['testing_accuracy'], 1.0)
self.assertEqual(result['predicted_classes'],
['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'B',
'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B',
'B', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C'])
def test_classical_multiclass_error_correcting_code(self):
training_input = {'A': np.asarray([[0.6560706, 0.17605998],
[0.25776033, 0.47628296],
[0.79687342, 0.26933706],
[0.39016555, -0.08469916],
[0.3994399, 0.13601573],
[0.26752049, -0.03978988],
[0.24026485, 0.01953518],
[0.49490503, 0.17239737],
[0.70171827, 0.5323737],
[0.43221576, 0.42357294],
[0.62864856, 0.45504447],
[0.6259567, 0.30917324],
[0.58272403, 0.20760754],
[0.3938784, 0.17184466],
[0.14154948, 0.06201424],
[0.80202323, 0.40582692],
[0.46779595, 0.39946754],
[0.57660199, 0.21821317],
[0.51044761, 0.03699459],
[0.8690704, 0.70847635]]),
'B': np.asarray([[0.38857596, -0.33775802],
[0.49946978, -0.48727951],
[-0.30119743, -0.11221681],
[-0.16479252, -0.08640519],
[-0.21808884, -0.56508327],
[-0.14683258, -0.46528508],
[-0.05888195, -0.51474852],
[0.20517435, -0.66839091],
[0.25475584, -0.21239966],
[0.55194854, 0.02789679],
[-0.11542951, -0.54157026],
[0.44625538, -0.49485869],
[-0.14609118, -0.60719757],
[0.18121305, -0.1922198],
[0.19283785, -0.31798925],
[0.29626405, -0.54563098],
[-0.39044304, -0.36527253],
[-0.29432215, -0.43924164],
[-0.40294517, -0.31381308],
[0.49156185, -0.3660534]]),
'C': np.asarray([[-0.68088231, 0.46824423],
[-0.56167659, 0.65270294],
[-0.54323753, 0.67630888],
[-0.57685569, -0.08515631],
[-0.67765364, 0.19654347],
[-0.62129115, 0.22223066],
[-0.78040851, 0.65247848],
[-0.50730279, 0.59898039],
[-0.64275805, 0.63381998],
[-0.72854201, 0.14151325],
[-0.57004437, 0.12344874],
[-0.55215973, 0.74331215],
[-0.60916047, 0.52006917],
[-0.23093745, 1.],
[-0.84025337, 0.5564536],
[-0.66952391, 0.57918859],
[-0.67725082, 0.60439934],
[-1., 0.23715261],
[-0.62933025, 0.19055405],
[-0.82139073, 0.29941512]])}
test_input = {'A': np.asarray([[0.57483139, 0.47120732],
[0.48372348, 0.25438544],
[0.08791134, 0.11515506],
[0.45988094, 0.32854319],
[0.53015085, 0.41539212],
[0.5073321, 0.47346751],
[0.71081819, 0.19202569],
[1., 0.51698289],
[0.630973, 0.19898666],
[0.48142649, 0.15931707]]),
'B': np.asarray([[-0.06048935, -0.48345293],
[-0.01065613, -0.33910828],
[-0.17323832, -0.49535592],
[0.14043268, -0.87869109],
[-0.15046837, -0.47340207],
[-0.39600934, -0.21647957],
[-0.394202, -0.44705385],
[0.15243621, -0.36695163],
[0.06195634, -0.23262325],
[0.06183066, -0.53376975]]),
'C': np.asarray([[-0.74561108, 0.27047295],
[-0.69942965, 0.11885162],
[-0.52649891, 0.35265538],
[-0.54345106, 0.13113995],
[-0.57181448, 0.13594725],
[-0.33713329, 0.05095243],
[-0.65741384, 0.477976],
[-0.79986067, 0.41733195],
[-0.73856328, 0.80699537],
[-0.66489165, 0.1181712]])}
temp = [test_input[k] for k in sorted(test_input)]
total_array = np.concatenate(temp)
params = {
'problem': {'name': 'classification'},
'algorithm': {
'name': 'SVM',
},
'multiclass_extension': {'name': 'ErrorCorrectingCode', 'code_size': 5},
}
algo_input = ClassificationInput(training_input, test_input, total_array)
result = run_algorithm(params, algo_input)
self.assertEqual(result['testing_accuracy'], 1.0)
self.assertEqual(result['predicted_classes'],
['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'B',
'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B',
'B', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C'])
| 59.521253
| 120
| 0.31211
|
4f59b98f7077940bdb461e47c637fb543f989369
| 467
|
py
|
Python
|
data/scripts/templates/object/mobile/shared_dressed_commoner_naboo_moncal_female_01.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/mobile/shared_dressed_commoner_naboo_moncal_female_01.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/mobile/shared_dressed_commoner_naboo_moncal_female_01.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_commoner_naboo_moncal_female_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","moncal_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 27.470588
| 85
| 0.743041
|
0f11d4e036bc8e12f68d9c251b7c58f8e92cc043
| 7,044
|
py
|
Python
|
wiutils/preprocessors.py
|
PEM-Humboldt/wildlife-insights-utils
|
90f0a3641830cee15ff933433b649d3ccec8c516
|
[
"MIT"
] | null | null | null |
wiutils/preprocessors.py
|
PEM-Humboldt/wildlife-insights-utils
|
90f0a3641830cee15ff933433b649d3ccec8c516
|
[
"MIT"
] | null | null | null |
wiutils/preprocessors.py
|
PEM-Humboldt/wildlife-insights-utils
|
90f0a3641830cee15ff933433b649d3ccec8c516
|
[
"MIT"
] | null | null | null |
"""
Functions to preprocess information before uploading it to WI.
"""
import datetime
import pathlib
from typing import Union
import cv2
import ffmpeg
import pandas as pd
from PIL import Image, ExifTags
def _get_exif_code(tag: str) -> int:
"""
Gets the exif code for a specific tag.
Parameters
----------
tag : str
Tag name to get the exif code for.
Returns
-------
int
Exif code.
"""
for key, value in ExifTags.TAGS.items():
if value == tag:
return key
raise ValueError(f"{tag} is not a valid Exif tag.")
def change_image_timestamp(
image_path: Union[str, pathlib.Path],
output_path: Union[str, pathlib.Path],
timestamp: Union[str, datetime.datetime, pd.Timestamp] = None,
offset: pd.DateOffset = None,
direction: str = None,
) -> None:
"""
Changes an image's associated timestamp metadata for a new timestamp.
This can be a new arbitrary timestamp or a computed new timestamp from
an offset and the original timestamp.
Parameters
----------
image_path : str or pathlib.Path
Relative or absolute path of the image to resample.
output_path : str or pathlib.Path
Relative or absolute path of the output image.
timestamp : str, datetime.datetime or pd.Timestamp
New timestamp to write to the image's metadata.
offset : pd.Offset
Offset to add to or subtract from the original image's timestamp.
This argument only has effect when no timestamp is specified
direction : str
Possible values are:
* 'forward': to add the offset to the original timestamp.
* 'backward': to subtract the offset from the original
timestamp.
This argument only has effect when an offset is specified.
Returns
-------
None
"""
if not isinstance(image_path, pathlib.Path):
image_path = pathlib.Path(image_path)
if not isinstance(output_path, pathlib.Path):
output_path = pathlib.Path(output_path)
image = Image.open(image_path.as_posix())
exif = image.getexif()
if timestamp is not None:
if not isinstance(timestamp, pd.Timestamp):
timestamp = pd.Timestamp(timestamp)
else:
timestamp = exif[_get_exif_code("DateTime")]
timestamp = pd.Timestamp(timestamp.replace(":", "-", 2))
if direction == "forward":
timestamp += offset
elif direction == "backward":
timestamp -= offset
else:
raise ValueError("direction must be one of ['forward', 'backward']")
exif[_get_exif_code("DateTime")] = timestamp.strftime("%Y:%m:%d %H:%M:%S")
exif[_get_exif_code("DateTimeOriginal")] = timestamp.strftime("%Y:%m:%d %H:%M:%S")
image.save(output_path.as_posix(), format=image.format, exif=exif)
def convert_video_to_images(
video_path: Union[str, pathlib.Path],
output_path: Union[str, pathlib.Path],
timestamp: Union[str, datetime.datetime, pd.Timestamp] = None,
image_format: str = "jpeg",
offset: int = None,
) -> None:
"""
Converts a video to images with an associated timestamp.
Parameters
----------
video_path : str or pathlib.Path
Relative or absolute path of the video to convert.
output_path : str or pathlib.Path
Relative or absolute path of the folder to save the images to. If
the folder does not exist, it will be created.
timestamp : str, datetime.datetime or pd.Timestamp
Timestamp of the beginning of the video. If no timestamp is
provided, it will be automatically extracted from the metadata.
image_format : str
Image format of the output images. Possible values are:
- 'jpeg'
- 'png'
offset : int
Offset (in seconds) to convert frames to images. For example, if
offset is 1, the output images will correspond to 1 second-separated
frames of the video. If offset is None, all the frames in the video
will be converted to images.
Returns
-------
None
"""
if not isinstance(video_path, pathlib.Path):
video_path = pathlib.Path(video_path)
if not isinstance(output_path, pathlib.Path):
output_path = pathlib.Path(output_path)
if image_format not in ("jpeg", "png"):
raise ValueError("image_format must be one of ['jpeg', 'png'].")
if image_format == "jpeg":
ext = "jpg"
else:
ext = image_format
if timestamp is not None:
start = pd.Timestamp(timestamp)
else:
info = ffmpeg.probe(video_path.as_posix())
try:
start = info["format"]["tags"]["creation_time"]
except KeyError:
raise Exception(f"{video_path.as_posix()} does not have a creation date.")
start = pd.Timestamp(start)
video = cv2.VideoCapture(video_path.as_posix())
frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
width = len(str(frames))
datetime_code = _get_exif_code("DateTimeOriginal")
output_path.mkdir(parents=True, exist_ok=True)
count = 1
flag, arr = video.read()
while flag:
image = Image.fromarray(cv2.cvtColor(arr, cv2.COLOR_RGB2BGR))
exif = image.getexif()
timestamp = start + pd.Timedelta(milliseconds=video.get(cv2.CAP_PROP_POS_MSEC))
exif[datetime_code] = timestamp.strftime("%Y:%m:%d %H:%M:%S")
name = video_path.stem + "_" + str(count).zfill(width) + f".{ext}"
image.save(output_path.joinpath(name).as_posix(), format=image_format, exif=exif)
if offset:
video.set(cv2.CAP_PROP_POS_MSEC, count * (offset * 1e3))
flag, arr = video.read()
count += 1
def reduce_image_size(
image_path: Union[str, pathlib.Path],
output_path: Union[str, pathlib.Path],
factor: float = 0.9,
method: int = Image.ANTIALIAS,
) -> None:
"""
Reduces image file size by resampling using a given factor.
Parameters
----------
image_path : str or pathlib.Path
Relative or absolute path of the image to resample.
output_path : str or pathlib.Path
Relative or absolute path of the output image.
factor : float
Resampling factor.
method : int
Image resizing method used by PIL. Possible values are:
- PIL.Image.NEAREST (0)
- PIL.Image.ANTIALIAS or PIL.Image.LANCZOS (1)
- PIL.Image.BILINEAR (2)
- PIL.Image.BICUBIC (3)
Returns
-------
None
"""
if not isinstance(image_path, pathlib.Path):
image_path = pathlib.Path(image_path)
if not isinstance(output_path, pathlib.Path):
output_path = pathlib.Path(output_path)
image = Image.open(image_path.as_posix())
exif = image.getexif()
new_width = round(image.width * factor)
new_height = round(image.height * factor)
result = image.resize((new_width, new_height), method)
result.save(output_path.as_posix(), format=image.format, exif=exif)
| 31.72973
| 89
| 0.638984
|
346ceca58e9695150a073e2140c238f7ab479d9d
| 10,147
|
py
|
Python
|
src/modeling/meta_arch/layers/wrappers.py
|
deeplearning-wisc/stud
|
b667a369e368181ef6e913c32f26e574bead9b56
|
[
"Apache-2.0"
] | 22
|
2022-03-09T03:13:10.000Z
|
2022-03-31T02:45:50.000Z
|
src/modeling/meta_arch/layers/wrappers.py
|
deeplearning-wisc/stud
|
b667a369e368181ef6e913c32f26e574bead9b56
|
[
"Apache-2.0"
] | 1
|
2022-03-22T12:27:38.000Z
|
2022-03-22T22:45:46.000Z
|
src/modeling/meta_arch/layers/wrappers.py
|
deeplearning-wisc/stud
|
b667a369e368181ef6e913c32f26e574bead9b56
|
[
"Apache-2.0"
] | 2
|
2022-03-21T02:32:53.000Z
|
2022-03-22T18:43:52.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# --------------------------------------------------------
# Descripttion: https://github.com/sxhxliang/detectron2_backbone
# version: 0.0.1
# Author: Shihua Liang (sxhx.liang@gmail.com)
# FilePath: /detectron2_backbone/detectron2_backbone/layers/wrappers.py
# Create: 2020-05-04 10:28:09
# LastAuthor: Shihua Liang
# lastTime: 2020-05-06 19:43:34
# --------------------------------------------------------
import math
import torch
from torch import nn
from torch.nn import init
from torch.nn import functional as F
from torch.nn.parameter import Parameter
from torch.nn.modules.utils import _single, _pair, _triple, _ntuple
from detectron2.layers.batch_norm import get_norm
TORCH_VERSION = tuple(int(x) for x in torch.__version__.split(".")[:2])
__all__ = ["_Conv2d", "Conv2d", "SeparableConv2d", "MaxPool2d"]
class _NewEmptyTensorOp(torch.autograd.Function):
@staticmethod
def forward(ctx, x, new_shape):
ctx.shape = x.shape
return x.new_empty(new_shape)
@staticmethod
def backward(ctx, grad):
shape = ctx.shape
return _NewEmptyTensorOp.apply(grad, shape), None
class _Conv2d(nn.Conv2d):
def __init__(self,
in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, groups=1,
bias=True, padding_mode='zeros', image_size=None):
self.padding_mode = padding_mode
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
# pading format:
# tuple(pad_l, pad_r, pad_t, pad_b) or default
if padding_mode == 'static_same':
p = max(kernel_size[0] - stride[0], 0)
padding = (p // 2, p - p // 2, p // 2, p - p // 2)
elif padding_mode == 'dynamic_same':
padding = _pair(0)
super(_Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
def conv2d_forward(self, input, weight):
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[1] + 1) // 2, self.padding[1] // 2,
(self.padding[0] + 1) // 2, self.padding[0] // 2)
input = F.pad(input, expanded_padding, mode='circular')
elif self.padding_mode == 'dynamic_same':
ih, iw = x.size()[-2:]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
input = F.pad(input, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2])
elif self.padding_mode == 'static_same':
input = F.pad(input, self.padding)
else: # default padding
input = F.pad(input, self.padding)
return F.conv2d(input,
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
def forward(self, input):
return self.conv2d_forward(input, self.weight)
def __repr__(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0,) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
if self.padding_mode != 'zeros':
s += ', padding_mode={padding_mode}'
return self.__class__.__name__ + '(' + s.format(**self.__dict__) + ')'
class Conv2d(_Conv2d):
"""
A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features.
"""
def __init__(self, *args, **kwargs):
"""
Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`:
Args:
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
It assumes that norm layer is used before activation.
"""
norm = kwargs.pop("norm", None)
activation = kwargs.pop("activation", None)
super().__init__(*args, **kwargs)
self.norm = norm
self.activation = activation
def forward(self, x):
if x.numel() == 0 and self.training:
# https://github.com/pytorch/pytorch/issues/12013
assert not isinstance(
self.norm, torch.nn.SyncBatchNorm
), "SyncBatchNorm does not support empty inputs!"
if x.numel() == 0 and TORCH_VERSION <= (1, 4):
assert not isinstance(
self.norm, torch.nn.GroupNorm
), "GroupNorm does not support empty inputs in PyTorch <=1.4!"
# When input is empty, we want to return a empty tensor with "correct" shape,
# So that the following operations will not panic
# if they check for the shape of the tensor.
# This computes the height and width of the output tensor
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // s + 1
for i, p, di, k, s in zip(
x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
)
]
output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
empty = _NewEmptyTensorOp.apply(x, output_shape)
if self.training:
# This is to make DDP happy.
# DDP expects all workers to have gradient w.r.t the same set of parameters.
_dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
return empty + _dummy
else:
return empty
x = super().forward(x)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
class SeparableConv2d(nn.Module): # Depth wise separable conv
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1,
bias=True, padding_mode='zeros', norm=None, eps=1e-05, momentum=0.1, activation=None):
super(SeparableConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.dilation = _pair(dilation)
self.groups = in_channels
self.bias = bias
self.padding_mode = padding_mode
self.depthwise = Conv2d(in_channels, in_channels, kernel_size,
stride, padding, dilation, groups=in_channels, bias=False, padding_mode=padding_mode)
self.pointwise = Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias, padding_mode=padding_mode)
self.padding = self.depthwise.padding
self.norm = None if norm == "" else norm
if self.norm is not None:
self.norm = get_norm(norm, out_channels)
assert self.norm != None
self.norm.eps = eps
self.norm.momentum = momentum
self.activation = activation
def forward(self, x):
x = self.depthwise(x)
x = self.pointwise(x)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
def __repr__(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.groups != 1:
s += ', groups={groups}'
if self.pointwise.bias is None:
s += ', bias=False'
if self.padding_mode != 'zeros':
s += ', padding_mode={padding_mode}'
if self.norm is not None:
s = " " + s + '\n norm=' + self.norm.__repr__()
return self.__class__.__name__ + '(\n ' + s.format(**self.__dict__) + '\n)'
else:
return self.__class__.__name__ + '(' + s.format(**self.__dict__) + ')'
class MaxPool2d(nn.Module):
def __init__(self, kernel_size, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False,
padding_mode='static_same'):
super(MaxPool2d, self).__init__()
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride) or self.kernel_size
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.return_indices = return_indices
self.ceil_mode = ceil_mode
self.padding_mode = padding_mode
if padding_mode == 'static_same':
p = max(self.kernel_size[0] - self.stride[0], 0)
# tuple(pad_l, pad_r, pad_t, pad_b)
padding = (p // 2, p - p // 2, p // 2, p - p // 2)
self.padding = padding
elif padding_mode == 'dynamic_same':
padding = _pair(0)
self.padding = padding
def forward(self, input):
input = F.pad(input, self.padding)
return F.max_pool2d(input, self.kernel_size, self.stride,
_pair(0), self.dilation, self.ceil_mode,
self.return_indices)
def extra_repr(self):
return 'kernel_size={kernel_size}, stride={stride}, padding={padding}' \
', dilation={dilation}, ceil_mode={ceil_mode}, padding_mode={padding_mode}'.format(**self.__dict__)
| 40.106719
| 117
| 0.56736
|
8b2300dce4a1d13ce89bb104d2af5e77393fa305
| 19,809
|
py
|
Python
|
main.py
|
wzzheng/DCML
|
01a7220bac7ebb1e70416ef663f3ba7cee9e8bf5
|
[
"MIT"
] | 11
|
2021-06-15T08:47:10.000Z
|
2022-03-30T02:31:25.000Z
|
main.py
|
wzzheng/DCML
|
01a7220bac7ebb1e70416ef663f3ba7cee9e8bf5
|
[
"MIT"
] | 2
|
2021-08-09T05:05:21.000Z
|
2021-09-03T13:56:23.000Z
|
main.py
|
wzzheng/DCML
|
01a7220bac7ebb1e70416ef663f3ba7cee9e8bf5
|
[
"MIT"
] | 2
|
2021-06-21T08:59:30.000Z
|
2022-01-01T19:22:04.000Z
|
"""==================================================================================================="""
################### LIBRARIES ###################
### Basic Libraries
import warnings
warnings.filterwarnings("ignore")
import os, sys, numpy as np, argparse, imp, datetime, pandas as pd, copy
import time, pickle as pkl, random, json, collections, itertools as it
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from tqdm import tqdm
### DML-specific Libraries
import parameters as par
import utilities.misc as misc
"""==================================================================================================="""
################### INPUT ARGUMENTS ###################
parser = argparse.ArgumentParser()
parser = par.basic_training_parameters(parser)
parser = par.batch_creation_parameters(parser)
parser = par.batchmining_specific_parameters(parser)
parser = par.loss_specific_parameters(parser)
parser = par.wandb_parameters(parser)
parser = par.diva_parameters(parser)
##### Read in parameters
opt = parser.parse_args()
"""==================================================================================================="""
if opt.dataset=='online_products':
opt.evaluation_metrics = ['e_recall@1', 'e_recall@10', 'e_recall@100', 'nmi', 'f1', 'mAP']
if 'shared' in opt.diva_features and 'selfsimilarity' in opt.diva_features and len(opt.diva_features)==3:
opt.diva_decorrelations = ['selfsimilarity-discriminative', 'shared-discriminative', 'shared-selfsimilarity']
if 'shared' in opt.diva_features and len(opt.diva_features)==4:
opt.diva_decorrelations = ['selfsimilarity-discriminative', 'shared-discriminative', 'intra-discriminative']
if 'dc' in opt.diva_features or 'imrot' in opt.diva_features:
opt.diva_decorrelations = []
if 'all' in opt.evaltypes:
"""==== EVALUATE DIFFERENT EMBEDDING SPACE REWEIGHTINGS ===="""
#Generally, there is a slight benefit in placing higher weights on non-discriminative features during testing.
opt.evaltypes = []
if len(opt.diva_features)==1:
opt.evaltypes = copy.deepcopy(opt.diva_features)
if len(opt.diva_features)==2:
for comb in list(it.product(opt.evaltypes, opt.evaltypes)):
comb_name = 'Combined_'+comb[0]+'_'+comb[1]+'-1-1'
comb_name_2 = 'Combined_'+comb[1]+'_'+comb[0]+'-1-1'
if comb_name not in opt.evaltypes and comb_name_2 not in opt.evaltypes and comb[0]!=comb[1]:
opt.evaltypes.append(comb_name)
opt.evaltypes.append('Combined_'+opt.diva_features[0]+'_'+opt.diva_features[1]+'-1-0.5')
opt.evaltypes.append('Combined_'+opt.diva_features[0]+'_'+opt.diva_features[1]+'-0.5-1')
if len(opt.diva_features)==3:
opt.evaltypes.append('Combined_'+opt.diva_features[0]+'_'+opt.diva_features[1]+'_'+opt.diva_features[2]+'-1.5-1-1')
opt.evaltypes.append('Combined_'+opt.diva_features[0]+'_'+opt.diva_features[1]+'_'+opt.diva_features[2]+'-1-1-1')
opt.evaltypes.append('Combined_'+opt.diva_features[0]+'_'+opt.diva_features[1]+'_'+opt.diva_features[2]+'-0.5-1-1')
if len(opt.diva_features)==4:
if opt.dataset!='online_products':
opt.evaltypes.append('Combined_'+opt.diva_features[0]+'_'+opt.diva_features[1]+'_'+opt.diva_features[2]+'_'+opt.diva_features[3]+'-0.75-1.25-1.25-1.25')
opt.evaltypes.append('Combined_'+opt.diva_features[0]+'_'+opt.diva_features[1]+'_'+opt.diva_features[2]+'_'+opt.diva_features[3]+'-0.5-1-1-1')
opt.evaltypes.append('Combined_'+opt.diva_features[0]+'_'+opt.diva_features[1]+'_'+opt.diva_features[2]+'_'+opt.diva_features[3]+'-0.5-1.5-1.5-1.5')
else:
opt.evaltypes.append('Combined_'+opt.diva_features[0]+'_'+opt.diva_features[1]+'_'+opt.diva_features[2]+'_'+opt.diva_features[3]+'-1-1-1-1')
opt.evaltypes.append('Combined_'+opt.diva_features[0]+'_'+opt.diva_features[1]+'_'+opt.diva_features[2]+'_'+opt.diva_features[3]+'-0.5-1-1-1')
opt.evaltypes.append('Combined_'+opt.diva_features[0]+'_'+opt.diva_features[1]+'_'+opt.diva_features[2]+'_'+opt.diva_features[3]+'-1-0.5-0.5-0.5')
opt.evaltypes.append('Combined_'+opt.diva_features[0]+'_'+opt.diva_features[1]+'_'+opt.diva_features[2]+'_'+opt.diva_features[3]+'-1.5-1-1-1')
"""==================================================================================================="""
### The following setting is useful when logging to wandb and running multiple seeds per setup:
### By setting the savename to <group_plus_seed>, the savename will instead comprise the group and the seed!
if opt.savename=='group_plus_seed':
if opt.log_online:
opt.savename = opt.group+'_s{}'.format(opt.seed)
else:
opt.savename = ''
### If wandb-logging is turned on, initialize the wandb-run here:
if opt.log_online:
import wandb
_ = os.system('wandb login {}'.format(opt.wandb_key))
os.environ['WANDB_API_KEY'] = opt.wandb_key
wandb.init(project=opt.project, group=opt.group, name=opt.savename, dir=opt.save_path)
wandb.config.update(opt)
"""==================================================================================================="""
### Load Remaining Libraries that neeed to be loaded after comet_ml
import torch, torch.nn as nn
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
import architectures as archs
import datasampler as dsamplers
import datasets as datasets
import criteria as criteria
import metrics as metrics
import batchminer as bmine
import evaluation as eval
from utilities import misc
from utilities import logger
"""==================================================================================================="""
full_training_start_time = time.time()
"""==================================================================================================="""
opt.source_path += '/'+opt.dataset
opt.save_path += '/'+opt.dataset
#Assert that the construction of the batch makes sense, i.e. the division into class-subclusters.
assert not opt.bs%opt.samples_per_class, 'Batchsize needs to fit number of samples per class for distance sampling and margin/triplet loss!'
opt.pretrained = not opt.not_pretrained
"""==================================================================================================="""
################### GPU SETTINGS ###########################
os.environ["CUDA_DEVICE_ORDER"] ="PCI_BUS_ID"
# if not opt.use_data_parallel:
os.environ["CUDA_VISIBLE_DEVICES"]= str(opt.gpu[0])
"""==================================================================================================="""
#################### SEEDS FOR REPROD. #####################
torch.backends.cudnn.deterministic=True; np.random.seed(opt.seed); random.seed(opt.seed)
torch.manual_seed(opt.seed); torch.cuda.manual_seed(opt.seed); torch.cuda.manual_seed_all(opt.seed)
"""==================================================================================================="""
##################### NETWORK SETUP ##################
#NOTE: Networks that can be used: 'bninception, resnet50, resnet101, alexnet...'
#>>>> see import pretrainedmodels; pretrainedmodels.model_names
opt.device = torch.device('cuda')
mfeat_net = 'multifeature_resnet50' if 'resnet' in opt.arch else 'multifeature_bninception'
model = archs.select(mfeat_net, opt)
opt.network_feature_dim = model.feature_dim
print('{} Setup for {} with {} batchmining on {} complete with #weights: {}'.format(opt.loss.upper(), opt.arch.upper(), opt.batch_mining.upper(), opt.dataset.upper(), misc.gimme_params(model)))
if opt.fc_lr<0:
to_optim = [{'params':model.parameters(),'lr':opt.lr,'weight_decay':opt.decay}]
else:
all_but_fc_params = [x[-1] for x in list(filter(lambda x: 'last_linear' not in x[0], model.named_parameters()))]
fc_params = model.model.last_linear.parameters()
to_optim = [{'params':all_but_fc_params,'lr':opt.lr,'weight_decay':opt.decay},
{'params':fc_params,'lr':opt.fc_lr,'weight_decay':opt.decay}]
#####
selfsim_model = archs.select(mfeat_net, opt)
selfsim_model.load_state_dict(model.state_dict())
#####
_ = model.to(opt.device)
_ = selfsim_model.to(opt.device)
"""============================================================================"""
#################### DATALOADER SETUPS ##################
dataloaders = {}
datasets = datasets.select(opt.dataset, opt, opt.source_path)
dataloaders['evaluation'] = torch.utils.data.DataLoader(datasets['evaluation'], num_workers=opt.kernels, batch_size=opt.bs, shuffle=False)
dataloaders['evaluation_train'] = torch.utils.data.DataLoader(datasets['evaluation_train'], num_workers=opt.kernels, batch_size=opt.bs, shuffle=False)
dataloaders['testing'] = torch.utils.data.DataLoader(datasets['testing'], num_workers=opt.kernels, batch_size=opt.bs, shuffle=False)
train_data_sampler = dsamplers.select(opt.data_sampler, opt, datasets['training'].image_dict, datasets['training'].image_list)
datasets['training'].include_aux_augmentations = True
dataloaders['training'] = torch.utils.data.DataLoader(datasets['training'], num_workers=opt.kernels, batch_sampler=train_data_sampler)
opt.n_classes = len(dataloaders['training'].dataset.avail_classes)
"""============================================================================"""
#################### CREATE LOGGING FILES ###############
sub_loggers = ['Train', 'Test', 'Model Grad']
LOG = logger.LOGGER(opt, sub_loggers=sub_loggers, start_new=True, log_online=opt.log_online)
"""============================================================================"""
#################### LOSS SETUP ####################
batchminer = bmine.select(opt.batch_mining, opt)
criterion_dict = {}
for key in opt.diva_features:
if 'discriminative' in key:
criterion_dict[key], to_optim = criteria.select(opt.loss, opt, to_optim, batchminer)
if len(opt.diva_decorrelations):
criterion_dict['separation'], to_optim = criteria.select('adversarial_separation', opt, to_optim, None)
if 'selfsimilarity' in opt.diva_features:
criterion_dict['selfsimilarity'], to_optim = criteria.select(opt.diva_ssl, opt, to_optim, None)
if 'invariantspread' in opt.diva_features:
criterion_dict['invariantspread'], to_optim = criteria.select('invariantspread', opt, to_optim, batchminer)
#############
if 'shared' in opt.diva_features:
if opt.diva_sharing=='standard':
shared_batchminer = bmine.select('shared_neg_distance', opt)
criterion_dict['shared'], to_optim = criteria.select(opt.loss, opt, to_optim, shared_batchminer)
elif opt.diva_sharing=='random':
random_shared_batchminer = bmine.select('random_distance', opt)
criterion_dict['shared'], to_optim = criteria.select(opt.loss, opt, to_optim, random_shared_batchminer)
elif opt.diva_sharing=='full':
full_shared_batchminer = bmine.select('shared_full_distance', opt)
criterion_dict['shared'], to_optim = criteria.select(opt.loss, opt, to_optim, full_shared_batchminer)
else:
raise Exception('Sharing method {} not available!'.format(opt.diva_sharing))
#############
if 'intra' in opt.diva_features:
if opt.diva_intra=='random':
intra_batchminer = bmine.select('intra_random', opt)
else:
raise Exception('Intra-Feature method {} not available!'.format(opt.diva_intra))
criterion_dict['intra'], to_optim = criteria.select(opt.loss, opt, to_optim, intra_batchminer)
#############
if 'dc' in opt.diva_features:
criterion_dict['dc'], to_optim = criteria.select('dc', opt, to_optim, batchminer)
if 'imrot' in opt.diva_features:
criterion_dict['imrot'], to_optim = criteria.select('imrot', opt, to_optim, batchminer)
for key in criterion_dict.keys():
_ = criterion_dict[key].to(opt.device)
if 'selfsimilarity' in criterion_dict:
criterion_dict['selfsimilarity'].create_memory_queue(selfsim_model, dataloaders['training'], opt.device, opt_key='selfsimilarity')
if 'imrot' in criterion_dict:
dataloaders['training'].dataset.predict_rotations = True
"""============================================================================"""
#################### OPTIM SETUP ####################
optimizer = torch.optim.Adam(to_optim)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=opt.tau, gamma=opt.gamma)
"""============================================================================"""
#################### METRIC COMPUTER ####################
metric_computer = metrics.MetricComputer(opt.evaluation_metrics)
"""============================================================================"""
################### SCRIPT MAIN ##########################
print('\n-----\n')
iter_count = 0
for epoch in range(opt.n_epochs):
opt.epoch = epoch
### Scheduling Changes specifically for cosine scheduling
if opt.scheduler!='none': print('Running with learning rates {}...'.format(' | '.join('{}'.format(x) for x in scheduler.get_lr())))
"""======================================="""
if train_data_sampler.requires_storage:
train_data_sampler.precompute_indices()
"""======================================="""
if 'dc' in criterion_dict and epoch%opt.diva_dc_update_f==0:
criterion_dict['dc'].update_pseudo_labels(model, dataloaders['evaluation_train'], opt.device)
"""======================================="""
### Train one epoch
start = time.time()
_ = model.train()
loss_collect = {'train':[], 'separation':[]}
data_iterator = tqdm(dataloaders['training'], desc='Epoch {} Training...'.format(epoch))
for i,(class_labels, input, input_indices, aux_input, imrot_labels) in enumerate(data_iterator):
###################
if 'invariantspread' in criterion_dict:
input = torch.cat([input[:len(input)//2,:], aux_input[:len(input)//2]], dim=0)
features = model(input.to(opt.device))
features, direct_features, supervise_loss = features
###################
if 'selfsimilarity' in criterion_dict:
with torch.no_grad():
### Use shuffleBN to avoid information bleeding making samples interdependent.
forward_shuffle, backward_reorder = criterion_dict['selfsimilarity'].shuffleBN(len(features['selfsimilarity']))
selfsim_key_features = selfsim_model(aux_input[forward_shuffle].to(opt.device))
if isinstance(selfsim_key_features, tuple): selfsim_key_features = selfsim_key_features[0]
selfsim_key_features = selfsim_key_features['selfsimilarity'][backward_reorder]
###################
loss = supervise_loss
for key, feature in features.items():
if 'discriminative' in key:
loss_discr = criterion_dict[key](feature, class_labels)
loss = loss + loss_discr
if 'selfsimilarity' in criterion_dict:
loss_selfsim = criterion_dict['selfsimilarity'](features['selfsimilarity'], selfsim_key_features)
loss = loss + opt.diva_alpha_ssl*loss_selfsim
if 'shared' in features:
loss_shared = criterion_dict['shared'](features['shared'], class_labels)
loss = loss + opt.diva_alpha_shared*loss_shared
if 'intra' in features:
loss_intra = criterion_dict['intra'](features['intra'], class_labels)
loss = loss + opt.diva_alpha_intra*loss_intra
if 'invariantspread' in criterion_dict:
head_1 = features['invariantspread'][:len(input)//2]
head_2 = features['invariantspread'][len(input)//2:]
loss_invsp = criterion_dict['invariantspread'](head_1, head_2)
loss = loss + loss_invsp
if 'dc' in criterion_dict:
loss_dc = criterion_dict['dc'](direct_features, input_indices)
loss = loss + loss_dc
if 'imrot' in criterion_dict:
loss_imrot = criterion_dict['imrot'](direct_features, imrot_labels)
loss = loss + loss_imrot
if 'separation' in criterion_dict:
loss_adv = criterion_dict['separation'](features)
loss = loss + loss_adv
optimizer.zero_grad()
loss.backward()
### Compute Model Gradients and log them!
grads = np.concatenate([p.grad.detach().cpu().numpy().flatten() for p in model.parameters() if p.grad is not None])
grad_l2, grad_max = np.mean(np.sqrt(np.mean(np.square(grads)))), np.mean(np.max(np.abs(grads)))
LOG.progress_saver['Model Grad'].log('Grad L2', grad_l2, group='L2')
LOG.progress_saver['Model Grad'].log('Grad Max', grad_max, group='Max')
if opt.diva_moco_trainable_temp:
LOG.progress_saver['Train'].log('temp', criterion_dict['selfsimilarity'].temperature.cpu().detach().numpy(), group='Temp')
### Update network weights!
optimizer.step()
###
loss_collect['train'].append(loss.item())
if 'separation' in criterion_dict:
loss_collect['separation'].append(loss_adv.item())
if 'selfsimilarity' in criterion_dict:
### Update Key Network
for model_par, key_model_par in zip(model.parameters(), selfsim_model.parameters()):
momentum = criterion_dict['selfsimilarity'].momentum
key_model_par.data.copy_(key_model_par.data*momentum + model_par.data*(1-momentum))
###
criterion_dict['selfsimilarity'].update_memory_queue(selfsim_key_features)
###
iter_count += 1
if i==len(dataloaders['training'])-1: data_iterator.set_description('Epoch (Train) {0}: Mean Loss [{1:.4f}]'.format(epoch, np.mean(loss_collect['train'])))
"""======================================="""
if train_data_sampler.requires_storage and train_data_sampler.update_storage:
train_data_sampler.replace_storage_entries(features.detach().cpu(), input_indices)
result_metrics = {'loss': np.mean(loss_collect['train'])}
if 'separation' in criterion_dict:
result_metrics['sep. loss'] = np.mean(loss_collect['separation'])
####
LOG.progress_saver['Train'].log('epochs', epoch)
for metricname, metricval in result_metrics.items():
LOG.progress_saver['Train'].log(metricname, metricval)
LOG.progress_saver['Train'].log('time', np.round(time.time()-start, 4))
"""======================================="""
### Evaluate -
_ = model.eval()
if opt.dataset in ['cars196', 'cub200', 'online_products']:
test_dataloaders = [dataloaders['testing']]
elif opt.dataset=='in-shop':
test_dataloaders = [dataloaders['testing_query'], dataloaders['testing_gallery']]
eval.evaluate(opt.dataset, LOG, metric_computer, test_dataloaders, model, opt, opt.evaltypes, opt.device)
LOG.update(all=True)
"""======================================="""
### Learning Rate Scheduling Step
if opt.scheduler != 'none':
scheduler.step()
print('\n-----\n')
"""======================================================="""
### CREATE A SUMMARY TEXT FILE
summary_text = ''
full_training_time = time.time()-full_training_start_time
summary_text += 'Training Time: {} min.\n'.format(np.round(full_training_time/60,2))
summary_text += '---------------\n'
for sub_logger in LOG.sub_loggers:
metrics = LOG.graph_writer[sub_logger].ov_title
summary_text += '{} metrics: {}\n'.format(sub_logger.upper(), metrics)
with open(opt.save_path+'/training_summary.txt','w') as summary_file:
summary_file.write(summary_text)
| 45.960557
| 193
| 0.608511
|
89d1c2bec768efcc5ba03f5166353ddd0cdb399d
| 88
|
py
|
Python
|
scikits/__init__.py
|
pierregm/scikits.timeseries-sandbox
|
9937aa30ee393d847607ad27fd42c08aebe97417
|
[
"eGenix"
] | 10
|
2015-07-06T12:19:25.000Z
|
2021-11-12T16:28:21.000Z
|
scikits/__init__.py
|
pierregm/scikits.timeseries
|
05cbb314612ce516160270d59c933c420ef860ea
|
[
"eGenix"
] | 1
|
2018-05-28T15:16:47.000Z
|
2020-03-24T17:10:35.000Z
|
scikits/__init__.py
|
pierregm/scikits.timeseries
|
05cbb314612ce516160270d59c933c420ef860ea
|
[
"eGenix"
] | 6
|
2015-05-01T03:00:43.000Z
|
2020-10-22T21:33:14.000Z
|
# Activate namespace packages.
__import__('pkg_resources').declare_namespace(__name__)
| 22
| 55
| 0.829545
|
b1f9d61539878e8b296163da564cff82a47c936d
| 2,046
|
py
|
Python
|
subsurface/structs/structured_elements.py
|
andieie/subsurface
|
96d3cc4a0ef6eb549d1ea9d32b71a1380a1383e7
|
[
"Apache-2.0"
] | null | null | null |
subsurface/structs/structured_elements.py
|
andieie/subsurface
|
96d3cc4a0ef6eb549d1ea9d32b71a1380a1383e7
|
[
"Apache-2.0"
] | null | null | null |
subsurface/structs/structured_elements.py
|
andieie/subsurface
|
96d3cc4a0ef6eb549d1ea9d32b71a1380a1383e7
|
[
"Apache-2.0"
] | null | null | null |
"""For regularly gridded datasets like rasters and tensor meshes.
"""
import numpy as np
from .base_structures import StructuredData
class OctreeMesh:
"""
TODO: implement as Dom discussed with data frames to track the levels.
"""
def __init__(self, data: StructuredData):
raise NotImplementedError
class StructuredSurface:
def __init__(self, structured_data: StructuredData):
# TODO check structured_data has two coordinates
self.ds = structured_data
# Add pyvista methods of gridded data
class StructuredGrid:
# TODO check structured_data has three coordinates
"""Container for curvilinear mesh grids.
This is analogous to PyVista's StructuredGrid class or discretize's
CurviMesh class.
"""
def __init__(self, structured_data: StructuredData):
self.ds = structured_data
@property
def cartesian_dimensions(self):
return len(self.cartesian_coords_names)
@property
def cartesian_coords_names(self):
coord_names = np.array(['X', 'Y', 'Z', 'x', 'y', 'z'])
return coord_names[np.isin(coord_names, self.ds.data.dims)]
@property
def coord(self):
return self.ds.data.coords
@property
def meshgrid_3d(self):
cart_coord = [self.coord[i] for i in self.cartesian_coords_names]
grid_3d = np.meshgrid(*cart_coord, indexing='ij')
return grid_3d
def meshgrid_2d(self, attribute_name_coord_name: str = None):
"""
Args:
attribute_name_coord_name(str): Name of the xarray.Dataset coord that
will be used for the z direction. This must be 2d
Returns:
"""
grid_2d = np.meshgrid(self.coord['x'], self.coord['y'])
if attribute_name_coord_name is not None:
z_coord = self.ds.data[attribute_name_coord_name].values.T
if z_coord.ndim != 2:
raise AttributeError('The attribute must be a 2D array')
grid_2d.append(z_coord)
return grid_2d
| 26.921053
| 81
| 0.65738
|
8d0fe903e3b62c9ada1e1c3e9650c29fbd96c8f0
| 21,836
|
py
|
Python
|
official/transformer/model/beam_search.py
|
KSomi/models
|
cc6c45ca6b701426d35bbbab104ad32a2e80a3cf
|
[
"Apache-2.0"
] | 5
|
2020-04-08T16:12:27.000Z
|
2021-05-14T14:05:06.000Z
|
official/transformer/model/beam_search.py
|
KSomi/models
|
cc6c45ca6b701426d35bbbab104ad32a2e80a3cf
|
[
"Apache-2.0"
] | 2
|
2019-01-09T11:58:51.000Z
|
2019-08-05T02:32:49.000Z
|
official/transformer/model/beam_search.py
|
KSomi/models
|
cc6c45ca6b701426d35bbbab104ad32a2e80a3cf
|
[
"Apache-2.0"
] | 3
|
2020-11-24T06:57:08.000Z
|
2021-08-06T06:40:56.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Beam search to find the translated sequence with the highest probability.
Source implementation from Tensor2Tensor:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/beam_search.py
"""
import tensorflow as tf
from tensorflow.python.util import nest
# Default value for INF
INF = 1. * 1e7
class _StateKeys(object):
"""Keys to dictionary storing the state of the beam search loop."""
# Variable storing the loop index.
CUR_INDEX = "CUR_INDEX"
# Top sequences that are alive for each batch item. Alive sequences are ones
# that have not generated an EOS token. Sequences that reach EOS are marked as
# finished and moved to the FINISHED_SEQ tensor.
# Has shape [batch_size, beam_size, CUR_INDEX + 1]
ALIVE_SEQ = "ALIVE_SEQ"
# Log probabilities of each alive sequence. Shape [batch_size, beam_size]
ALIVE_LOG_PROBS = "ALIVE_LOG_PROBS"
# Dictionary of cached values for each alive sequence. The cache stores
# the encoder output, attention bias, and the decoder attention output from
# the previous iteration.
ALIVE_CACHE = "ALIVE_CACHE"
# Top finished sequences for each batch item.
# Has shape [batch_size, beam_size, CUR_INDEX + 1]. Sequences that are
# shorter than CUR_INDEX + 1 are padded with 0s.
FINISHED_SEQ = "FINISHED_SEQ"
# Scores for each finished sequence. Score = log probability / length norm
# Shape [batch_size, beam_size]
FINISHED_SCORES = "FINISHED_SCORES"
# Flags indicating which sequences in the finished sequences are finished.
# At the beginning, all of the sequences in FINISHED_SEQ are filler values.
# True -> finished sequence, False -> filler. Shape [batch_size, beam_size]
FINISHED_FLAGS = "FINISHED_FLAGS"
class SequenceBeamSearch(object):
"""Implementation of beam search loop."""
def __init__(self, symbols_to_logits_fn, vocab_size, batch_size,
beam_size, alpha, max_decode_length, eos_id):
self.symbols_to_logits_fn = symbols_to_logits_fn
self.vocab_size = vocab_size
self.batch_size = batch_size
self.beam_size = beam_size
self.alpha = alpha
self.max_decode_length = max_decode_length
self.eos_id = eos_id
def search(self, initial_ids, initial_cache):
"""Beam search for sequences with highest scores."""
state, state_shapes = self._create_initial_state(initial_ids, initial_cache)
finished_state = tf.while_loop(
self._continue_search, self._search_step, loop_vars=[state],
shape_invariants=[state_shapes], parallel_iterations=1, back_prop=False)
finished_state = finished_state[0]
alive_seq = finished_state[_StateKeys.ALIVE_SEQ]
alive_log_probs = finished_state[_StateKeys.ALIVE_LOG_PROBS]
finished_seq = finished_state[_StateKeys.FINISHED_SEQ]
finished_scores = finished_state[_StateKeys.FINISHED_SCORES]
finished_flags = finished_state[_StateKeys.FINISHED_FLAGS]
# Account for corner case where there are no finished sequences for a
# particular batch item. In that case, return alive sequences for that batch
# item.
finished_seq = tf.where(
tf.reduce_any(finished_flags, 1), finished_seq, alive_seq)
finished_scores = tf.where(
tf.reduce_any(finished_flags, 1), finished_scores, alive_log_probs)
return finished_seq, finished_scores
def _create_initial_state(self, initial_ids, initial_cache):
"""Return initial state dictionary and its shape invariants.
Args:
initial_ids: initial ids to pass into the symbols_to_logits_fn.
int tensor with shape [batch_size, 1]
initial_cache: dictionary storing values to be passed into the
symbols_to_logits_fn.
Returns:
state and shape invariant dictionaries with keys from _StateKeys
"""
# Current loop index (starts at 0)
cur_index = tf.constant(0)
# Create alive sequence with shape [batch_size, beam_size, 1]
alive_seq = _expand_to_beam_size(initial_ids, self.beam_size)
alive_seq = tf.expand_dims(alive_seq, axis=2)
# Create tensor for storing initial log probabilities.
# Assume initial_ids are prob 1.0
initial_log_probs = tf.constant(
[[0.] + [-float("inf")] * (self.beam_size - 1)])
alive_log_probs = tf.tile(initial_log_probs, [self.batch_size, 1])
# Expand all values stored in the dictionary to the beam size, so that each
# beam has a separate cache.
alive_cache = nest.map_structure(
lambda t: _expand_to_beam_size(t, self.beam_size), initial_cache)
# Initialize tensor storing finished sequences with filler values.
finished_seq = tf.zeros(tf.shape(alive_seq), tf.int32)
# Set scores of the initial finished seqs to negative infinity.
finished_scores = tf.ones([self.batch_size, self.beam_size]) * -INF
# Initialize finished flags with all False values.
finished_flags = tf.zeros([self.batch_size, self.beam_size], tf.bool)
# Create state dictionary
state = {
_StateKeys.CUR_INDEX: cur_index,
_StateKeys.ALIVE_SEQ: alive_seq,
_StateKeys.ALIVE_LOG_PROBS: alive_log_probs,
_StateKeys.ALIVE_CACHE: alive_cache,
_StateKeys.FINISHED_SEQ: finished_seq,
_StateKeys.FINISHED_SCORES: finished_scores,
_StateKeys.FINISHED_FLAGS: finished_flags
}
# Create state invariants for each value in the state dictionary. Each
# dimension must be a constant or None. A None dimension means either:
# 1) the dimension's value is a tensor that remains the same but may
# depend on the input sequence to the model (e.g. batch size).
# 2) the dimension may have different values on different iterations.
state_shape_invariants = {
_StateKeys.CUR_INDEX: tf.TensorShape([]),
_StateKeys.ALIVE_SEQ: tf.TensorShape([None, self.beam_size, None]),
_StateKeys.ALIVE_LOG_PROBS: tf.TensorShape([None, self.beam_size]),
_StateKeys.ALIVE_CACHE: nest.map_structure(
_get_shape_keep_last_dim, alive_cache),
_StateKeys.FINISHED_SEQ: tf.TensorShape([None, self.beam_size, None]),
_StateKeys.FINISHED_SCORES: tf.TensorShape([None, self.beam_size]),
_StateKeys.FINISHED_FLAGS: tf.TensorShape([None, self.beam_size])
}
return state, state_shape_invariants
def _continue_search(self, state):
"""Return whether to continue the search loop.
The loops should terminate when
1) when decode length has been reached, or
2) when the worst score in the finished sequences is better than the best
score in the alive sequences (i.e. the finished sequences are provably
unchanging)
Args:
state: A dictionary with the current loop state.
Returns:
Bool tensor with value True if loop should continue, False if loop should
terminate.
"""
i = state[_StateKeys.CUR_INDEX]
alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS]
finished_scores = state[_StateKeys.FINISHED_SCORES]
finished_flags = state[_StateKeys.FINISHED_FLAGS]
not_at_max_decode_length = tf.less(i, self.max_decode_length)
# Calculate largest length penalty (the larger penalty, the better score).
max_length_norm = _length_normalization(self.alpha, self.max_decode_length)
# Get the best possible scores from alive sequences.
best_alive_scores = alive_log_probs[:, 0] / max_length_norm
# Compute worst score in finished sequences for each batch element
finished_scores *= tf.cast(finished_flags,
tf.float32) # set filler scores to zero
lowest_finished_scores = tf.reduce_min(finished_scores, axis=1)
# If there are no finished sequences in a batch element, then set the lowest
# finished score to -INF for that element.
finished_batches = tf.reduce_any(finished_flags, 1)
lowest_finished_scores += (1.0 -
tf.cast(finished_batches, tf.float32)) * -INF
worst_finished_score_better_than_best_alive_score = tf.reduce_all(
tf.greater(lowest_finished_scores, best_alive_scores)
)
return tf.logical_and(
not_at_max_decode_length,
tf.logical_not(worst_finished_score_better_than_best_alive_score)
)
def _search_step(self, state):
"""Beam search loop body.
Grow alive sequences by a single ID. Sequences that have reached the EOS
token are marked as finished. The alive and finished sequences with the
highest log probabilities and scores are returned.
A sequence's finished score is calculating by dividing the log probability
by the length normalization factor. Without length normalization, the
search is more likely to return shorter sequences.
Args:
state: A dictionary with the current loop state.
Returns:
new state dictionary.
"""
# Grow alive sequences by one token.
new_seq, new_log_probs, new_cache = self._grow_alive_seq(state)
# Collect top beam_size alive sequences
alive_state = self._get_new_alive_state(new_seq, new_log_probs, new_cache)
# Combine newly finished sequences with existing finished sequences, and
# collect the top k scoring sequences.
finished_state = self._get_new_finished_state(state, new_seq, new_log_probs)
# Increment loop index and create new state dictionary
new_state = {_StateKeys.CUR_INDEX: state[_StateKeys.CUR_INDEX] + 1}
new_state.update(alive_state)
new_state.update(finished_state)
return [new_state]
def _grow_alive_seq(self, state):
"""Grow alive sequences by one token, and collect top 2*beam_size sequences.
2*beam_size sequences are collected because some sequences may have reached
the EOS token. 2*beam_size ensures that at least beam_size sequences are
still alive.
Args:
state: A dictionary with the current loop state.
Returns:
Tuple of
(Top 2*beam_size sequences [batch_size, 2 * beam_size, cur_index + 1],
Scores of returned sequences [batch_size, 2 * beam_size],
New alive cache, for each of the 2 * beam_size sequences)
"""
i = state[_StateKeys.CUR_INDEX]
alive_seq = state[_StateKeys.ALIVE_SEQ]
alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS]
alive_cache = state[_StateKeys.ALIVE_CACHE]
beams_to_keep = 2 * self.beam_size
# Get logits for the next candidate IDs for the alive sequences. Get the new
# cache values at the same time.
flat_ids = _flatten_beam_dim(alive_seq) # [batch_size * beam_size]
flat_cache = nest.map_structure(_flatten_beam_dim, alive_cache)
flat_logits, flat_cache = self.symbols_to_logits_fn(flat_ids, i, flat_cache)
# Unflatten logits to shape [batch_size, beam_size, vocab_size]
logits = _unflatten_beam_dim(flat_logits, self.batch_size, self.beam_size)
new_cache = nest.map_structure(
lambda t: _unflatten_beam_dim(t, self.batch_size, self.beam_size),
flat_cache)
# Convert logits to normalized log probs
candidate_log_probs = _log_prob_from_logits(logits)
# Calculate new log probabilities if each of the alive sequences were
# extended # by the the candidate IDs.
# Shape [batch_size, beam_size, vocab_size]
log_probs = candidate_log_probs + tf.expand_dims(alive_log_probs, axis=2)
# Each batch item has beam_size * vocab_size candidate sequences. For each
# batch item, get the k candidates with the highest log probabilities.
flat_log_probs = tf.reshape(log_probs,
[-1, self.beam_size * self.vocab_size])
topk_log_probs, topk_indices = tf.nn.top_k(flat_log_probs, k=beams_to_keep)
# Extract the alive sequences that generate the highest log probabilities
# after being extended.
topk_beam_indices = topk_indices // self.vocab_size
topk_seq, new_cache = _gather_beams(
[alive_seq, new_cache], topk_beam_indices, self.batch_size,
beams_to_keep)
# Append the most probable IDs to the topk sequences
topk_ids = topk_indices % self.vocab_size
topk_ids = tf.expand_dims(topk_ids, axis=2)
topk_seq = tf.concat([topk_seq, topk_ids], axis=2)
return topk_seq, topk_log_probs, new_cache
def _get_new_alive_state(self, new_seq, new_log_probs, new_cache):
"""Gather the top k sequences that are still alive.
Args:
new_seq: New sequences generated by growing the current alive sequences
int32 tensor with shape [batch_size, 2 * beam_size, cur_index + 1]
new_log_probs: Log probabilities of new sequences
float32 tensor with shape [batch_size, beam_size]
new_cache: Dict of cached values for each sequence.
Returns:
Dictionary with alive keys from _StateKeys:
{Top beam_size sequences that are still alive (don't end with eos_id)
Log probabilities of top alive sequences
Dict cache storing decoder states for top alive sequences}
"""
# To prevent finished sequences from being considered, set log probs to -INF
new_finished_flags = tf.equal(new_seq[:, :, -1], self.eos_id)
new_log_probs += tf.cast(new_finished_flags, tf.float32) * -INF
top_alive_seq, top_alive_log_probs, top_alive_cache = _gather_topk_beams(
[new_seq, new_log_probs, new_cache], new_log_probs, self.batch_size,
self.beam_size)
return {
_StateKeys.ALIVE_SEQ: top_alive_seq,
_StateKeys.ALIVE_LOG_PROBS: top_alive_log_probs,
_StateKeys.ALIVE_CACHE: top_alive_cache
}
def _get_new_finished_state(self, state, new_seq, new_log_probs):
"""Combine new and old finished sequences, and gather the top k sequences.
Args:
state: A dictionary with the current loop state.
new_seq: New sequences generated by growing the current alive sequences
int32 tensor with shape [batch_size, beam_size, i + 1]
new_log_probs: Log probabilities of new sequences
float32 tensor with shape [batch_size, beam_size]
Returns:
Dictionary with finished keys from _StateKeys:
{Top beam_size finished sequences based on score,
Scores of finished sequences,
Finished flags of finished sequences}
"""
i = state[_StateKeys.CUR_INDEX]
finished_seq = state[_StateKeys.FINISHED_SEQ]
finished_scores = state[_StateKeys.FINISHED_SCORES]
finished_flags = state[_StateKeys.FINISHED_FLAGS]
# First append a column of 0-ids to finished_seq to increment the length.
# New shape of finished_seq: [batch_size, beam_size, i + 1]
finished_seq = tf.concat(
[finished_seq,
tf.zeros([self.batch_size, self.beam_size, 1], tf.int32)], axis=2)
# Calculate new seq scores from log probabilities.
length_norm = _length_normalization(self.alpha, i + 1)
new_scores = new_log_probs / length_norm
# Set the scores of the still-alive seq in new_seq to large negative values.
new_finished_flags = tf.equal(new_seq[:, :, -1], self.eos_id)
new_scores += (1. - tf.cast(new_finished_flags, tf.float32)) * -INF
# Combine sequences, scores, and flags.
finished_seq = tf.concat([finished_seq, new_seq], axis=1)
finished_scores = tf.concat([finished_scores, new_scores], axis=1)
finished_flags = tf.concat([finished_flags, new_finished_flags], axis=1)
# Return the finished sequences with the best scores.
top_finished_seq, top_finished_scores, top_finished_flags = (
_gather_topk_beams([finished_seq, finished_scores, finished_flags],
finished_scores, self.batch_size, self.beam_size))
return {
_StateKeys.FINISHED_SEQ: top_finished_seq,
_StateKeys.FINISHED_SCORES: top_finished_scores,
_StateKeys.FINISHED_FLAGS: top_finished_flags
}
def sequence_beam_search(
symbols_to_logits_fn, initial_ids, initial_cache, vocab_size, beam_size,
alpha, max_decode_length, eos_id):
"""Search for sequence of subtoken ids with the largest probability.
Args:
symbols_to_logits_fn: A function that takes in ids, index, and cache as
arguments. The passed in arguments will have shape:
ids -> [batch_size * beam_size, index]
index -> [] (scalar)
cache -> nested dictionary of tensors [batch_size * beam_size, ...]
The function must return logits and new cache.
logits -> [batch * beam_size, vocab_size]
new cache -> same shape/structure as inputted cache
initial_ids: Starting ids for each batch item.
int32 tensor with shape [batch_size]
initial_cache: dict containing starting decoder variables information
vocab_size: int size of tokens
beam_size: int number of beams
alpha: float defining the strength of length normalization
max_decode_length: maximum length to decoded sequence
eos_id: int id of eos token, used to determine when a sequence has finished
Returns:
Top decoded sequences [batch_size, beam_size, max_decode_length]
sequence scores [batch_size, beam_size]
"""
batch_size = tf.shape(initial_ids)[0]
sbs = SequenceBeamSearch(symbols_to_logits_fn, vocab_size, batch_size,
beam_size, alpha, max_decode_length, eos_id)
return sbs.search(initial_ids, initial_cache)
def _log_prob_from_logits(logits):
return logits - tf.reduce_logsumexp(logits, axis=2, keepdims=True)
def _length_normalization(alpha, length):
"""Return length normalization factor."""
return tf.pow(((5. + tf.cast(length, tf.float32)) / 6.), alpha)
def _expand_to_beam_size(tensor, beam_size):
"""Tiles a given tensor by beam_size.
Args:
tensor: tensor to tile [batch_size, ...]
beam_size: How much to tile the tensor by.
Returns:
Tiled tensor [batch_size, beam_size, ...]
"""
tensor = tf.expand_dims(tensor, axis=1)
tile_dims = [1] * tensor.shape.ndims
tile_dims[1] = beam_size
return tf.tile(tensor, tile_dims)
def _shape_list(tensor):
"""Return a list of the tensor's shape, and ensure no None values in list."""
# Get statically known shape (may contain None's for unknown dimensions)
shape = tensor.get_shape().as_list()
# Ensure that the shape values are not None
dynamic_shape = tf.shape(tensor)
for i in range(len(shape)): # pylint: disable=consider-using-enumerate
if shape[i] is None:
shape[i] = dynamic_shape[i]
return shape
def _get_shape_keep_last_dim(tensor):
shape_list = _shape_list(tensor)
# Only the last
for i in range(len(shape_list) - 1):
shape_list[i] = None
if isinstance(shape_list[-1], tf.Tensor):
shape_list[-1] = None
return tf.TensorShape(shape_list)
def _flatten_beam_dim(tensor):
"""Reshapes first two dimensions in to single dimension.
Args:
tensor: Tensor to reshape of shape [A, B, ...]
Returns:
Reshaped tensor of shape [A*B, ...]
"""
shape = _shape_list(tensor)
shape[0] *= shape[1]
shape.pop(1) # Remove beam dim
return tf.reshape(tensor, shape)
def _unflatten_beam_dim(tensor, batch_size, beam_size):
"""Reshapes first dimension back to [batch_size, beam_size].
Args:
tensor: Tensor to reshape of shape [batch_size*beam_size, ...]
batch_size: Tensor, original batch size.
beam_size: int, original beam size.
Returns:
Reshaped tensor of shape [batch_size, beam_size, ...]
"""
shape = _shape_list(tensor)
new_shape = [batch_size, beam_size] + shape[1:]
return tf.reshape(tensor, new_shape)
def _gather_beams(nested, beam_indices, batch_size, new_beam_size):
"""Gather beams from nested structure of tensors.
Each tensor in nested represents a batch of beams, where beam refers to a
single search state (beam search involves searching through multiple states
in parallel).
This function is used to gather the top beams, specified by
beam_indices, from the nested tensors.
Args:
nested: Nested structure (tensor, list, tuple or dict) containing tensors
with shape [batch_size, beam_size, ...].
beam_indices: int32 tensor with shape [batch_size, new_beam_size]. Each
value in beam_indices must be between [0, beam_size), and are not
necessarily unique.
batch_size: int size of batch
new_beam_size: int number of beams to be pulled from the nested tensors.
Returns:
Nested structure containing tensors with shape
[batch_size, new_beam_size, ...]
"""
# Computes the i'th coodinate that contains the batch index for gather_nd.
# Batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..].
batch_pos = tf.range(batch_size * new_beam_size) // new_beam_size
batch_pos = tf.reshape(batch_pos, [batch_size, new_beam_size])
# Create coordinates to be passed to tf.gather_nd. Stacking creates a tensor
# with shape [batch_size, beam_size, 2], where the last dimension contains
# the (i, j) gathering coordinates.
coordinates = tf.stack([batch_pos, beam_indices], axis=2)
return nest.map_structure(
lambda state: tf.gather_nd(state, coordinates), nested)
def _gather_topk_beams(nested, score_or_log_prob, batch_size, beam_size):
"""Gather top beams from nested structure."""
_, topk_indexes = tf.nn.top_k(score_or_log_prob, k=beam_size)
return _gather_beams(nested, topk_indexes, batch_size, beam_size)
| 40.139706
| 90
| 0.72179
|
48daa0b20683a5113d619e0090c5d602915443c3
| 946
|
py
|
Python
|
trac/versioncontrol/web_ui/tests/__init__.py
|
lelit/trac
|
ee8f811a29321f3c0fc8b8235d143e0ffcd6d013
|
[
"BSD-3-Clause"
] | null | null | null |
trac/versioncontrol/web_ui/tests/__init__.py
|
lelit/trac
|
ee8f811a29321f3c0fc8b8235d143e0ffcd6d013
|
[
"BSD-3-Clause"
] | null | null | null |
trac/versioncontrol/web_ui/tests/__init__.py
|
lelit/trac
|
ee8f811a29321f3c0fc8b8235d143e0ffcd6d013
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2014 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
import unittest
from trac.versioncontrol.web_ui.tests import browser, changeset, log, util, \
wikisyntax
def suite():
suite = unittest.TestSuite()
suite.addTest(browser.suite())
suite.addTest(changeset.suite())
suite.addTest(log.suite())
suite.addTest(util.suite())
suite.addTest(wikisyntax.suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 29.5625
| 77
| 0.697674
|
501ac67cf5072479665059bd32045b5781d0c97c
| 2,096
|
py
|
Python
|
back-end/www/tests/topic_tests.py
|
TUD-KInD/COCTEAU
|
8ea4f546397feaaf22b4e9958b509324b5b332aa
|
[
"MIT"
] | null | null | null |
back-end/www/tests/topic_tests.py
|
TUD-KInD/COCTEAU
|
8ea4f546397feaaf22b4e9958b509324b5b332aa
|
[
"MIT"
] | 1
|
2021-11-15T11:40:04.000Z
|
2021-11-15T11:40:04.000Z
|
back-end/www/tests/topic_tests.py
|
TUD-KInD/COCTEAU
|
8ea4f546397feaaf22b4e9958b509324b5b332aa
|
[
"MIT"
] | 2
|
2021-07-14T13:07:22.000Z
|
2022-01-25T23:58:06.000Z
|
from basic_tests import BasicTest
from models.model_operations import topic_operations
from models.model import db
import unittest
class TopicTest(BasicTest):
"""Test case for topics."""
def setUp(self):
db.create_all()
def test_create_topic(self):
title = "this is a topic title"
description = "this is a topic description"
topic = topic_operations.create_topic(
title=title, description=description)
assert topic in db.session
def test_get_topic(self):
title = "this is a topic title"
description = "this is a topic description"
topic = topic_operations.create_topic(
title=title, description=description)
topic_id = topic.id
retrieved_topic = topic_operations.get_topic_by_id(topic_id)
assert retrieved_topic.title == title and retrieved_topic.description == description
def test_update_topic(self):
title = "this is a topic title"
description = "this is a topic description"
topic = topic_operations.create_topic(
title=title, description=description)
topic_id = topic.id
new_title = "new title"
new_description = "new description"
topic_operations.update_topic(title=new_title, topic_id=topic_id)
retrieved_topic = topic_operations.get_topic_by_id(topic_id)
assert retrieved_topic.title == new_title
topic_operations.update_topic(
description=new_description, topic_id=topic_id)
retrieved_topic = topic_operations.get_topic_by_id(topic_id)
assert retrieved_topic.description == new_description
def test_remove_topic(self):
title = "this is a topic title"
description = "this is a topic description"
topic = topic_operations.create_topic(
title=title, description=description)
assert topic in db.session
topic_id = topic.id
topic_operations.remove_topic(topic_id)
assert topic not in db.session
if __name__ == "__main__":
unittest.main()
| 27.946667
| 92
| 0.679866
|
07c1ac0cf29bec030e400a556c0d395e587f2805
| 1,270
|
py
|
Python
|
app/coreo/tests/test_models.py
|
rathore287/recipe-api
|
2ee88b6774522ecacf20861dd2752198f71dfc9c
|
[
"MIT"
] | null | null | null |
app/coreo/tests/test_models.py
|
rathore287/recipe-api
|
2ee88b6774522ecacf20861dd2752198f71dfc9c
|
[
"MIT"
] | null | null | null |
app/coreo/tests/test_models.py
|
rathore287/recipe-api
|
2ee88b6774522ecacf20861dd2752198f71dfc9c
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_email_successful(self):
""" Test for creating user with email successfully"""
email= "harshraj.rathore@gmail.com"
password = "password@123"
user = get_user_model().objects.create_user(
email = email,
password = password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_user_email_normalized(self):
""" Normalizes the email"""
email = "harshraj.rathore@GMAIL.com"
user = get_user_model().objects.create_user(email, "test123")
self.assertEqual(user.email, email.lower())
def test_invalid_email(self):
""" Tests if user email address is valid."""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, "test123")
def test_creating_superuser(self):
""" Creates superuser admin"""
user = get_user_model().objects.create_superuser(
"harshraj.rathore@gmail.com",
"password@123"
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| 33.421053
| 69
| 0.646457
|
6455c11567e13944bec1c55fcd4e992efb81c67c
| 154
|
py
|
Python
|
api/tools/learn/jialuo.py
|
kiddestiny/Darks
|
4727004a0e42728ded9bed015b37990a2f34a782
|
[
"MIT"
] | null | null | null |
api/tools/learn/jialuo.py
|
kiddestiny/Darks
|
4727004a0e42728ded9bed015b37990a2f34a782
|
[
"MIT"
] | null | null | null |
api/tools/learn/jialuo.py
|
kiddestiny/Darks
|
4727004a0e42728ded9bed015b37990a2f34a782
|
[
"MIT"
] | null | null | null |
speed_open = [0,5.2,11.4,18.4,26.2,35.2,45.6,57.8,72,89.2,110.2,136.4,170]
speed_close = [0,1,8.2,16.6,26.6,37.6,51.4,68.2,89.2,116,152]
# speed_all =
| 25.666667
| 74
| 0.616883
|
7ca7c672651b1cd0421991cd75e7d7f44dbf87e2
| 659
|
py
|
Python
|
ots/manage.py
|
hasibarrafiul/Travel-Guide-WebApp
|
f160ce1c4ef95b6e08abcaebf2c702bfdecacd6f
|
[
"MIT"
] | 3
|
2021-12-01T15:56:42.000Z
|
2021-12-23T15:49:48.000Z
|
ots/manage.py
|
rashikbuksh/Optimal-Transportation-System
|
18c6d5341c6d3ecbb1c8fcfba8e46ca2ba493882
|
[
"MIT"
] | 8
|
2021-11-11T10:43:35.000Z
|
2022-01-08T23:27:33.000Z
|
ots/manage.py
|
rashikbuksh/Optimal-Transportation-System
|
18c6d5341c6d3ecbb1c8fcfba8e46ca2ba493882
|
[
"MIT"
] | 3
|
2021-11-20T18:35:59.000Z
|
2022-02-15T13:35:47.000Z
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ots.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.652174
| 73
| 0.676783
|
2661a66bc1d87bc059e66bc7599803f6fb927c55
| 3,095
|
py
|
Python
|
tests/test_examples.py
|
Willtor/deepsparse
|
e994c2ca479d675cdfc16958502d92952dfe3aee
|
[
"Apache-2.0"
] | 460
|
2021-02-04T17:51:41.000Z
|
2022-03-31T14:50:16.000Z
|
tests/test_examples.py
|
markurtz/deepsparse
|
5a5557f4bf9026545116b22b36dcb7d506e8a070
|
[
"Apache-2.0"
] | 69
|
2021-02-09T04:35:05.000Z
|
2022-03-31T04:15:16.000Z
|
tests/test_examples.py
|
markurtz/deepsparse
|
5a5557f4bf9026545116b22b36dcb7d506e8a070
|
[
"Apache-2.0"
] | 36
|
2021-02-04T22:20:38.000Z
|
2022-03-18T02:32:15.000Z
|
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from unittest.mock import patch
import pytest
from sparsezoo.models.classification import mobilenet_v1
from sparsezoo.objects import Model
SRC_DIRS = [
os.path.join(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "../examples"),
dirname,
)
for dirname in [
"benchmark",
"classification",
"detection",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import check_correctness
import classification
import detection
import run_benchmark
@pytest.mark.parametrize(
"model, batch_size",
(
[
pytest.param(
mobilenet_v1,
b,
)
for b in [1, 8, 64]
]
),
)
def test_check_correctness(model: Model, batch_size: int):
m = model()
testargs = f"""
check_correctness.py
{m.onnx_file.downloaded_path()}
--batch_size {batch_size}
""".split()
with patch.object(sys, "argv", testargs):
check_correctness.main()
@pytest.mark.parametrize(
"model, batch_size",
(
[
pytest.param(
mobilenet_v1,
b,
)
for b in [1, 8, 64]
]
),
)
def test_run_benchmark(model: Model, batch_size: int):
m = model()
testargs = f"""
run_benchmark.py
{m.onnx_file.downloaded_path()}
--batch_size {batch_size}
""".split()
with patch.object(sys, "argv", testargs):
run_benchmark.main()
@pytest.mark.parametrize(
"model_name, batch_size",
(
[
pytest.param(
"mobilenet_v1",
b,
)
for b in [1, 8, 64]
]
),
)
def test_classification(model_name: str, batch_size: int):
testargs = f"""
classification.py
{model_name}
--batch_size {batch_size}
""".split()
with patch.object(sys, "argv", testargs):
classification.main()
@pytest.mark.parametrize(
"model_name, batch_size",
(
[
pytest.param(
"yolo_v3",
b,
)
for b in [1, 8, 64]
]
),
)
def test_detection(model_name: str, batch_size: int):
testargs = f"""
detection.py
{model_name}
--batch_size {batch_size}
""".split()
with patch.object(sys, "argv", testargs):
detection.main()
| 22.591241
| 80
| 0.575121
|
e0c43c1ee93a6b0c27411c3a3e9caf11b7067ebe
| 2,655
|
py
|
Python
|
cogs/react.py
|
SQCS-TW/SQCS_bot
|
9851bea5a71bbe0606e28686aa4a14c078453f91
|
[
"MIT"
] | null | null | null |
cogs/react.py
|
SQCS-TW/SQCS_bot
|
9851bea5a71bbe0606e28686aa4a14c078453f91
|
[
"MIT"
] | null | null | null |
cogs/react.py
|
SQCS-TW/SQCS_bot
|
9851bea5a71bbe0606e28686aa4a14c078453f91
|
[
"MIT"
] | null | null | null |
from core.classes import Cog_Extension, JsonApi
from discord.ext import commands
import core.functions as func
import time
from core.setup import rsp, fluctlight_client
import asyncio
class React(Cog_Extension):
@commands.Cog.listener()
async def on_member_join(self, member):
nts = JsonApi().get_json('NT')["id_list"]
if member.id in nts:
return
if member.bot:
return
time_status = await func.get_time_title(func.now_time_info('hour'))
msg = '\n'.join(rsp["join"]["opening"][time_status]) + '\n'
msg += '\n'.join(rsp["join"]["opening"]["main"])
await member.send(msg)
await asyncio.sleep(60)
msg = '\n'.join(rsp["join"]["hackmd_read"])
await member.send(msg)
def check(message):
return message.channel == member.dm_channel and message.author == member
try:
deep_freeze_status = (await self.bot.wait_for('message', check=check, timeout=60.0)).content
if deep_freeze_status == 'y':
msg = '\n'.join(rsp["join"]["df_1"])
deep_freeze_status = 1
elif deep_freeze_status == 'n':
msg = '\n'.join(rsp["join"]["df_0"])
deep_freeze_status = 0
else:
msg = '\n'.join(rsp["join"]["invalid_syntax"])
deep_freeze_status = 0
except asyncio.TimeoutError:
msg = '\n'.join(rsp["join"]["time_out"])
deep_freeze_status = 0
# another \n for last un-inserted \n
msg += '\n' + '\n'.join(rsp["join"]["contact_method"])
await member.send(msg)
# create personal fluctlight data
start_time = time.time()
fluctlight_cursor = fluctlight_client["light-cube-info"]
member_fluctlight = {
"_id": member.id,
"score": 0,
"du": 0,
"oc_auth": 0,
"sc_auth": 0,
"lvl_ind": 0,
"mdu": 0,
"odu": 0,
"odu_time": time.time(),
"contrib": 0,
"week_active": 0,
"deep_freeze": deep_freeze_status
}
try:
fluctlight_cursor.insert_one(member_fluctlight)
except:
fluctlight_cursor.delete_one({"_id": member.id})
fluctlight_cursor.insert_one(member_fluctlight)
end_time = time.time()
msg = '\n'.join(rsp["join"]["fl_create_finish"])
await member.send(msg)
await member.send(f'順帶一提,我用了 {round(end_time - start_time, 2)} (sec) 建立你的檔案><!')
def setup(bot):
bot.add_cog(React(bot))
| 29.831461
| 104
| 0.551412
|
6491938a2d4e1f3060c808fd9057a5a714753448
| 4,270
|
py
|
Python
|
py/sophus/quaternion.py
|
brentyi/Sophus
|
d6a9b043d42f4b322c63cc72f058afa70e324fd2
|
[
"MIT"
] | 2,111
|
2019-01-29T07:01:32.000Z
|
2022-03-29T06:48:14.000Z
|
py/sophus/quaternion.py
|
brentyi/Sophus
|
d6a9b043d42f4b322c63cc72f058afa70e324fd2
|
[
"MIT"
] | 262
|
2015-01-03T19:00:20.000Z
|
2022-03-30T20:33:47.000Z
|
py/sophus/quaternion.py
|
brentyi/Sophus
|
d6a9b043d42f4b322c63cc72f058afa70e324fd2
|
[
"MIT"
] | 502
|
2015-01-14T00:38:41.000Z
|
2022-03-31T16:09:46.000Z
|
""" run with: python3 -m sophus.quaternion """
import sophus
import sympy
import sys
import unittest
class Quaternion:
""" Quaternion class """
def __init__(self, real, vec):
""" Quaternion consists of a real scalar, and an imaginary 3-vector """
assert isinstance(vec, sympy.Matrix)
assert vec.shape == (3, 1), vec.shape
self.real = real
self.vec = vec
def __mul__(self, right):
""" quaternion multiplication """
return Quaternion(self[3] * right[3] - self.vec.dot(right.vec),
self[3] * right.vec + right[3] * self.vec +
self.vec.cross(right.vec))
def __add__(self, right):
""" quaternion multiplication """
return Quaternion(self[3] + right[3], self.vec + right.vec)
def __neg__(self):
return Quaternion(-self[3], -self.vec)
def __truediv__(self, scalar):
""" scalar division """
return Quaternion(self.real / scalar, self.vec / scalar)
def __repr__(self):
return "( " + repr(self[3]) + " + " + repr(self.vec) + "i )"
def __getitem__(self, key):
""" We use the following convention [vec0, vec1, vec2, real] """
assert (key >= 0 and key < 4)
if key == 3:
return self.real
else:
return self.vec[key]
def squared_norm(self):
""" squared norm when considering the quaternion as 4-tuple """
return sophus.squared_norm(self.vec) + self.real**2
def conj(self):
""" quaternion conjugate """
return Quaternion(self.real, -self.vec)
def inv(self):
""" quaternion inverse """
return self.conj() / self.squared_norm()
@staticmethod
def identity():
return Quaternion(1, sophus.Vector3(0, 0, 0))
@staticmethod
def zero():
return Quaternion(0, sophus.Vector3(0, 0, 0))
def subs(self, x, y):
return Quaternion(self.real.subs(x, y), self.vec.subs(x, y))
def simplify(self):
v = sympy.simplify(self.vec)
return Quaternion(sympy.simplify(self.real),
sophus.Vector3(v[0], v[1], v[2]))
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.real == other.real and self.vec == other.vec
return False
@staticmethod
def Da_a_mul_b(a, b):
""" derivatice of quaternion muliplication wrt left multiplier a """
v0 = b.vec[0]
v1 = b.vec[1]
v2 = b.vec[2]
y = b.real
return sympy.Matrix([[y, v2, -v1, v0],
[-v2, y, v0, v1],
[v1, -v0, y, v2],
[-v0, -v1, -v2, y]])
@staticmethod
def Db_a_mul_b(a, b):
""" derivatice of quaternion muliplication wrt right multiplicand b """
u0 = a.vec[0]
u1 = a.vec[1]
u2 = a.vec[2]
x = a.real
return sympy.Matrix([[x, -u2, u1, u0],
[u2, x, -u0, u1],
[-u1, u0, x, u2],
[-u0, -u1, -u2, x]])
class TestQuaternion(unittest.TestCase):
def setUp(self):
x, u0, u1, u2 = sympy.symbols('x u0 u1 u2', real=True)
y, v0, v1, v2 = sympy.symbols('y v0 v1 v2', real=True)
u = sophus.Vector3(u0, u1, u2)
v = sophus.Vector3(v0, v1, v2)
self.a = Quaternion(x, u)
self.b = Quaternion(y, v)
def test_muliplications(self):
product = self.a * self.a.inv()
self.assertEqual(product.simplify(),
Quaternion.identity())
product = self.a.inv() * self.a
self.assertEqual(product.simplify(),
Quaternion.identity())
def test_derivatives(self):
d = sympy.Matrix(4, 4, lambda r, c: sympy.diff(
(self.a * self.b)[r], self.a[c]))
self.assertEqual(d,
Quaternion.Da_a_mul_b(self.a, self.b))
d = sympy.Matrix(4, 4, lambda r, c: sympy.diff(
(self.a * self.b)[r], self.b[c]))
self.assertEqual(d,
Quaternion.Db_a_mul_b(self.a, self.b))
if __name__ == '__main__':
unittest.main()
print('hello')
| 31.397059
| 79
| 0.527166
|
f8fe78cd180117568bb7ee08ca3a79e0d0e0c3fd
| 1,983
|
py
|
Python
|
Chess3/MoveImpl.py
|
LLCoolDave/Chess3
|
076740e5d67b6aba0ae077c8e7a3bc24690803f2
|
[
"MIT"
] | null | null | null |
Chess3/MoveImpl.py
|
LLCoolDave/Chess3
|
076740e5d67b6aba0ae077c8e7a3bc24690803f2
|
[
"MIT"
] | null | null | null |
Chess3/MoveImpl.py
|
LLCoolDave/Chess3
|
076740e5d67b6aba0ae077c8e7a3bc24690803f2
|
[
"MIT"
] | null | null | null |
class Coordinate(object):
def __init__(self, row, column):
if 0 <= row < 8 and 0 <= column < 8:
self.row = row
self.column = column
else:
raise ArithmeticError('Coordinate invalid')
def __eq__(self, other):
return self.row == other.row and self.column == other.column
def __hash__(self):
return hash((self.row, self.column))
def __repr__(self):
return '%s%s' % (chr(ord('A') + self.column), self.row + 1)
@classmethod
def from_string(cls, repr):
column = ord(repr[0]) - ord('A')
row = int(repr[1]) - 1
return cls(row, column)
class MoveAction(object):
def __init__(self, origin, target):
self.origin = origin
self.target = target
def __eq__(self, other):
return self.origin == other.origin and self.target == other.target
def __hash__(self):
return hash((self.origin, self.target))
def __repr__(self):
return '%s -> %s' % (self.origin, self.target)
class Move(object):
def __init__(self, name, origin, target, movements, captures, end_turn=True):
self.name = name
self.origin = origin
self.target = target
self.movements = movements
self.captures = captures
self.end_turn = end_turn
def __eq__(self, other):
# for Moves, the order of movements and captures actually matters, as they may have sideeffects, as does the name
return self.name == other.name and self.origin == other.origin and self.movements == other.movements \
and self.captures == other.captures and self.end_turn == other.end_turn
def __hash__(self):
return hash((self.name, self.origin, tuple(self.movements), tuple(self.captures), self.end_turn))
def __repr__(self):
return 'Move "%s" of %s: Movements %s capturing %s. Ends turn: %s' % (self.name, self.origin, self.movements, self.captures, self.end_turn)
| 31.983871
| 147
| 0.617751
|
e83aa5c71a74b8b347670dc8f39df8d9de34145e
| 14,497
|
bzl
|
Python
|
third_party/clif.bzl
|
fabbondanza/deepvariant
|
c0820eb6b185c612555a80febf89d761c383fafd
|
[
"BSD-3-Clause"
] | 1
|
2020-04-30T07:07:55.000Z
|
2020-04-30T07:07:55.000Z
|
third_party/clif.bzl
|
fabbondanza/deepvariant
|
c0820eb6b185c612555a80febf89d761c383fafd
|
[
"BSD-3-Clause"
] | null | null | null |
third_party/clif.bzl
|
fabbondanza/deepvariant
|
c0820eb6b185c612555a80febf89d761c383fafd
|
[
"BSD-3-Clause"
] | null | null | null |
"""Provide bazel rules for OSS CLIF."""
# Label for our OSS CLIF binary pyclif.
CLIF_PYCLIF = "@clif//:pyclif"
# Label for our OSS CLIF protobuf compiler.
CLIF_PROTO = "@clif//:proto"
# Label for our OSS CLIF C++ runtime headers and sources.
CLIF_CPP_RUNTIME = "@clif//:cpp_runtime"
# The CLIF generated code only compiles with C++11.
EXTRA_CC_FLAGS = ["-std=c++11"]
_PROTO_LIBRARY_SUFFIX = "_pyclif"
PYCLIF_PYEXT_SUFFIX = ".so"
PYCLIF_CC_LIB_SUFFIX = "_cclib"
PYCLIF_WRAP_SUFFIX = "_clif_wrap"
def _clif_wrap_cc_impl(ctx):
"""Executes CLIF cmdline tool to produce C++ python model from a CLIF spec."""
if len(ctx.files.srcs) != 1:
fail("Exactly one CLIF source file label must be specified.", "srcs")
clif_spec_file = ctx.files.srcs[0]
# Inputs is a set of all of the things we depend on, not inputs to the CLIF
# program itself.
inputs = depset([clif_spec_file])
for dep in ctx.attr.deps:
inputs += dep.cc.transitive_headers
inputs += ctx.files._cliflib
inputs += ctx.files.clif_deps
inputs += ctx.files.toolchain_deps
# Compute the set of include directories for CLIF so it can find header files
# used in the CLIF specification. These are the repo roots for all of our
# inputs (aka deps) plus all of the quote and system includes for our C++
# deps.
include_dirs = depset(_get_repository_roots(ctx, inputs))
for dep in ctx.attr.deps:
include_dirs += dep.cc.quote_include_directories
include_dirs += dep.cc.system_include_directories
# Construct our arguments for CLIF.
args = [
"--modname",
ctx.attr.package_name + "." + ctx.attr.module_name,
"-c",
ctx.outputs.cc_out.path,
"-g",
ctx.outputs.h_out.path,
"-i",
ctx.outputs.ccinit_out.path,
"--prepend",
"clif/python/types.h",
]
include_args = ["-I" + i for i in include_dirs.to_list()]
# Add these includes to CLIF itself.
args += include_args
# Add these includes to those passed through by CLIF to its C++ matcher.
args += ["-f" + " ".join(include_args + EXTRA_CC_FLAGS)]
# The last argument is the actual CLIF specification file.
args += [clif_spec_file.path]
outputs = [ctx.outputs.cc_out, ctx.outputs.h_out, ctx.outputs.ccinit_out]
ctx.actions.run(
executable = ctx.executable._clif,
arguments = args,
inputs = inputs.to_list(),
outputs = outputs,
mnemonic = "CLIF",
progress_message = "CLIF wrapping " + clif_spec_file.path,
)
_clif_wrap_cc = rule(
attrs = {
"srcs": attr.label_list(
mandatory = True,
allow_files = True,
),
"deps": attr.label_list(
allow_files = True,
providers = ["cc"],
),
"toolchain_deps": attr.label_list(
allow_files = True,
),
# For rule "//foo/python:bar_clif" this should be "bar".
"module_name": attr.string(mandatory = True),
# For rule "//foo/python:bar_clif" this should be "foo/python".
"package_name": attr.string(mandatory = True),
"clif_deps": attr.label_list(allow_files = True),
# Hidden attribute: the Label for our PYCLIF binary itself.
"_clif": attr.label(
default = Label(CLIF_PYCLIF),
executable = True,
cfg = "host",
),
# Hidden attribute: The label to the C++ CLIF header files.
"_cliflib": attr.label(
default = Label(CLIF_CPP_RUNTIME),
allow_files = True,
),
},
output_to_genfiles = True,
outputs = {
"cc_out": "%{module_name}.cc",
"h_out": "%{module_name}.h",
"ccinit_out": "%{module_name}_init.cc",
},
implementation = _clif_wrap_cc_impl,
)
def _get_repository_roots(ctx, files):
"""Returns abnormal root directories under which files reside.
When running a ctx.action, source files within the main repository are all
relative to the current directory; however, files that are generated or exist
in remote repositories will have their root directory be a subdirectory,
e.g. bazel-out/local-fastbuild/genfiles/external/jpeg_archive. This function
returns the set of these devious directories, ranked and sorted by popularity
in order to hopefully minimize the number of I/O system calls within the
compiler, because includes have quadratic complexity.
Args:
ctx: context
files: list of paths
Returns:
list of directories
"""
ctx = ctx # unused
result = {}
for f in files:
root = f.root.path
if root:
if root not in result:
result[root] = 0
result[root] -= 1
work = f.owner.workspace_root
if work:
if root:
root += "/"
root += work
if root:
if root not in result:
result[root] = 0
result[root] -= 1
return [k for v, k in sorted([(v, k) for k, v in result.items()])]
def _clif_to_lib(label, extension):
"""Gets a C++/python/etc library corresponding to a CLIF library rule.
Args:
label: string. The name of a clif_rule. If the name is of the
form <target>_pyclif we will stripe off the `_pyclif` ending.
extension: string. The expected extension of our name library.
Returns:
<target>_extension.
"""
if label.endswith(_PROTO_LIBRARY_SUFFIX):
basename = label[:-len(_PROTO_LIBRARY_SUFFIX)]
else:
basename = label
return basename + extension
def pyclifs_to_pyproto_libs(labels):
"""Gets the py protobuf label for each of pyclif label as a list."""
return [_clif_to_lib(name, "_py_pb2") for name in labels]
def pyclifs_to_ccproto_libs(labels):
"""Gets the cc protobuf label for each of pyclif label as a list."""
return [_clif_to_lib(name, "_cc_pb2") for name in labels]
def clif_deps_to_cclibs(labels):
"""Gets the cc_library name for each of label as a list."""
return [_clif_to_lib(name, PYCLIF_CC_LIB_SUFFIX) for name in labels]
def _symlink_impl(ctx):
"""Creates a symbolic link between src and out."""
out = ctx.outputs.out
src = ctx.attr.src.files.to_list()[0]
cmd = "ln -f -r -s %s %s" % (src.path, out.path)
ctx.actions.run_shell(
inputs = [src],
outputs = [out],
command = cmd,
)
symlink = rule(
implementation = _symlink_impl,
attrs = {
"src": attr.label(
mandatory = True,
allow_files = True,
single_file = True,
),
"out": attr.output(mandatory = True),
},
)
def py_clif_cc(
name,
srcs,
clif_deps = [],
pyclif_deps = [],
deps = [],
copts = [],
py_deps = [],
**kwargs):
"""Defines a CLIF wrapper rule making C++ libraries accessible to Python.
Here are two example working py_clif_cc rules:
py_clif_cc(
name = "proto_cpp",
srcs = ["proto_cpp.clif"],
pyclif_deps = ["//oss_clif:oss_pyclif"],
deps = ["//oss_clif:proto_cpp_lib"],
)
py_clif_cc(
name = "pyclif_dep",
srcs = ["pyclif_dep.clif"],
deps = ["//oss_clif:pyclif_dep_lib"],
)
Args:
name: The name of the rule. This name becomes a suitable target for Python
libraries to access the C++ code.
srcs: A list that must contain a single file named <name>.clif containing
our CLIF specification.
clif_deps: A list of other CLIF rules included by this one.
pyclif_deps: A potentially empty list of pyclif_proto_library rules
deps: A list of C++ dependencies.
copts: List of copts to provide to our native.cc_library when building our
python extension module.
py_deps: List of dependencies to provide to our the native.py_library
created by this rule.
**kwargs: kwargs passed to py_library rule created by this rule.
"""
pyext_so = name + PYCLIF_PYEXT_SUFFIX
cc_library_name = name + PYCLIF_CC_LIB_SUFFIX
extended_cc_deps = deps + [CLIF_CPP_RUNTIME] + pyclif_deps
# Here's a rough outline of how we build our pyclif library:
#
# Suppose we have a module named 'foo'.
#
# _clif_wrap_cc runs pyclif to produce foo.cc, foo.h, and foo_init.cc which
# C++ python extension module.
#
# native.cc_library is a normal C++ library with those sources, effectively
# our "python module" as a bazel C++ library allowing other rules to depend
# on that C++ code.
#
# native.cc_binary depends on foo's cc_library to create a shared python
# extension module (.so) which python will load via its dlopen mechanism.
# This .so library is also used by the _clif_wrap_cc rule to get include paths
# when building CLIF specs depending on other clif specs.
#
# native.py_library named `name` which provides a python bazel target that
# loads the cc_binary, as data, producing a py extension module. This also
# allows client python code to depend on this module.
_clif_wrap_cc(
name = name + PYCLIF_WRAP_SUFFIX,
srcs = srcs,
deps = extended_cc_deps + clif_deps_to_cclibs(clif_deps),
clif_deps = clif_deps_to_cclibs(clif_deps),
toolchain_deps = ["@bazel_tools//tools/cpp:current_cc_toolchain"],
module_name = name,
# Turns //foo/bar:baz_pyclif into foo.bar to create our fully-qualified
# python package name.
package_name = native.package_name().replace("/", "."),
)
native.cc_library(
name = cc_library_name,
hdrs = [
name + ".h",
],
srcs = [
name + ".cc",
name + "_init.cc",
],
copts = copts + EXTRA_CC_FLAGS,
deps = extended_cc_deps + clif_deps_to_cclibs(clif_deps),
)
# To prevent ODR violations, all of the extensions must live in one
# extension module. And to be compatible with existing protobuf
# generated code, that module must be _message.so.
symlink(
name = name + "_symlink",
out = pyext_so,
src = "@protobuf_archive//:python/google/protobuf/pyext/_message.so",
)
# We create our python module which is just a thin wrapper around our real
# python module pyext_so (producing name.so for python to load). This
# rule allows python code to depend on this module, even through its written
# in C++.
native.py_library(
name = name,
srcs = [],
srcs_version = "PY2AND3",
deps = pyclifs_to_pyproto_libs(pyclif_deps) + clif_deps + py_deps,
data = [pyext_so],
**kwargs
)
# Copied from: devtools/clif/python/clif_build_rule.bzl with heavy
# modifications.
def _clif_proto_parser_rule_impl(ctx):
"""Implementation of _run_clif_proto_parser_rule."""
proto_file = ctx.files.src[0]
args = [
"-c",
ctx.outputs.cc.path,
"-h",
ctx.outputs.hdr.path,
"--strip_dir=%s" % ctx.configuration.genfiles_dir.path,
"--source_dir='.'",
"%s" % proto_file.path,
]
inputs = []
for d in ctx.attr.deps:
if "proto" in dir(d):
inputs += list(d.proto.transitive_sources)
ctx.actions.run(
mnemonic = "ClifProtoLibraryGeneration",
arguments = args,
executable = ctx.executable.parser,
inputs = inputs,
outputs = [ctx.outputs.hdr, ctx.outputs.cc],
)
_run_clif_proto_parser_rule = rule(
attrs = {
"src": attr.label(allow_files = [".proto"]),
"hdr": attr.output(),
"cc": attr.output(),
"deps": attr.label_list(),
"parser": attr.label(
executable = True,
default = Label(CLIF_PROTO),
cfg = "host",
),
},
output_to_genfiles = True,
implementation = _clif_proto_parser_rule_impl,
)
def pyclif_proto_library(
name,
proto_lib,
proto_srcfile = "",
deps = [],
visibility = None,
compatible_with = None,
testonly = None):
"""Generate C++ CLIF extension for using a proto and dependent py_proto_lib.
Args:
name: generated cc_library (name.h) to use in py_clif_cc clif_deps
proto_lib: name of a proto_library rule
proto_srcfile: the proto name if it does not match proto_lib rule name
deps: passed to cc_library
visibility: passed to all generated "files": name.h name.a name_pb2.py
compatible_with: compatibility list
testonly: available for test rules only flag (default from package)
"""
if not name.endswith(_PROTO_LIBRARY_SUFFIX):
fail("The name of the 'pyclif_proto_library' target should be of the " +
"form '<PROTO_FILE>%s' where the proto " % _PROTO_LIBRARY_SUFFIX +
"file being wrapped has the name '<PROTO_FILE>.proto'.")
if proto_srcfile:
required_name = proto_srcfile[:-len(".proto")] + _PROTO_LIBRARY_SUFFIX
if name != required_name:
fail("The name of the 'pyclif_proto_library' target should be " +
"'%s' as it is wrapping %s." % (required_name, proto_srcfile))
hdr_file = name + ".h"
cpp_file = name + ".cc"
clifrule = name + "_clif_rule"
src = name[:-len(_PROTO_LIBRARY_SUFFIX)] + ".proto"
_run_clif_proto_parser_rule(
name = clifrule,
src = src,
hdr = hdr_file,
cc = cpp_file,
deps = deps + [proto_lib],
testonly = testonly,
)
# In OSS world, we cannot provide proto_lib as a direct dependency to our
# cc_library as it doesn't provide a cc file:
# in deps attribute of cc_library rule //oss_clif:oss_pyclif: proto_library
# rule '//oss_clif:oss_proto' is misplaced here (expected cc_inc_library,
# cc_library, objc_library, experimental_objc_library or cc_proto_library).
# So we need to synthesize our protobuf cc library name from our name as
# pyclif_name = proto_pyclif
# cc_proto_lib = proto_cc_pb2
native.cc_library(
name = name,
srcs = [cpp_file],
hdrs = [hdr_file],
deps = deps + [CLIF_CPP_RUNTIME] + pyclifs_to_ccproto_libs([name]),
visibility = visibility,
compatible_with = compatible_with,
testonly = testonly,
copts = EXTRA_CC_FLAGS,
)
| 34.191038
| 82
| 0.62544
|
49aa6f68a0966f75619ce1157e5cf5adc6bcb9bd
| 11,048
|
py
|
Python
|
homeassistant/components/elkm1/alarm_control_panel.py
|
UrbanDave/core
|
054ea77b45cbcb58db842b49f0629d493073cc1a
|
[
"Apache-2.0"
] | 3
|
2019-10-02T04:40:26.000Z
|
2020-02-16T13:19:08.000Z
|
homeassistant/components/elkm1/alarm_control_panel.py
|
UrbanDave/core
|
054ea77b45cbcb58db842b49f0629d493073cc1a
|
[
"Apache-2.0"
] | 25
|
2021-10-02T10:01:14.000Z
|
2022-03-31T06:11:49.000Z
|
homeassistant/components/elkm1/alarm_control_panel.py
|
UrbanDave/core
|
054ea77b45cbcb58db842b49f0629d493073cc1a
|
[
"Apache-2.0"
] | 1
|
2021-12-10T10:33:28.000Z
|
2021-12-10T10:33:28.000Z
|
"""Each ElkM1 area will be created as a separate alarm_control_panel."""
from __future__ import annotations
from typing import Any
from elkm1_lib.areas import Area
from elkm1_lib.const import AlarmState, ArmedStatus, ArmLevel, ArmUpState
from elkm1_lib.elements import Element
from elkm1_lib.elk import Elk
from elkm1_lib.keypads import Keypad
import voluptuous as vol
from homeassistant.components.alarm_control_panel import (
ATTR_CHANGED_BY,
AlarmControlPanelEntity,
AlarmControlPanelEntityFeature,
CodeFormat,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.restore_state import RestoreEntity
from . import ElkAttachedEntity, ElkEntity, create_elk_entities
from .const import (
ATTR_CHANGED_BY_ID,
ATTR_CHANGED_BY_KEYPAD,
ATTR_CHANGED_BY_TIME,
DOMAIN,
ELK_USER_CODE_SERVICE_SCHEMA,
)
DISPLAY_MESSAGE_SERVICE_SCHEMA = {
vol.Optional("clear", default=2): vol.All(vol.Coerce(int), vol.In([0, 1, 2])),
vol.Optional("beep", default=False): cv.boolean,
vol.Optional("timeout", default=0): vol.All(
vol.Coerce(int), vol.Range(min=0, max=65535)
),
vol.Optional("line1", default=""): cv.string,
vol.Optional("line2", default=""): cv.string,
}
SERVICE_ALARM_DISPLAY_MESSAGE = "alarm_display_message"
SERVICE_ALARM_ARM_VACATION = "alarm_arm_vacation"
SERVICE_ALARM_ARM_HOME_INSTANT = "alarm_arm_home_instant"
SERVICE_ALARM_ARM_NIGHT_INSTANT = "alarm_arm_night_instant"
SERVICE_ALARM_BYPASS = "alarm_bypass"
SERVICE_ALARM_CLEAR_BYPASS = "alarm_clear_bypass"
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the ElkM1 alarm platform."""
elk_data = hass.data[DOMAIN][config_entry.entry_id]
elk = elk_data["elk"]
entities: list[ElkEntity] = []
create_elk_entities(elk_data, elk.areas, "area", ElkArea, entities)
async_add_entities(entities, True)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_ALARM_ARM_VACATION,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_alarm_arm_vacation",
)
platform.async_register_entity_service(
SERVICE_ALARM_ARM_HOME_INSTANT,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_alarm_arm_home_instant",
)
platform.async_register_entity_service(
SERVICE_ALARM_ARM_NIGHT_INSTANT,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_alarm_arm_night_instant",
)
platform.async_register_entity_service(
SERVICE_ALARM_DISPLAY_MESSAGE,
DISPLAY_MESSAGE_SERVICE_SCHEMA,
"async_display_message",
)
platform.async_register_entity_service(
SERVICE_ALARM_BYPASS,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_bypass",
)
platform.async_register_entity_service(
SERVICE_ALARM_CLEAR_BYPASS,
ELK_USER_CODE_SERVICE_SCHEMA,
"async_clear_bypass",
)
class ElkArea(ElkAttachedEntity, AlarmControlPanelEntity, RestoreEntity):
"""Representation of an Area / Partition within the ElkM1 alarm panel."""
_attr_supported_features = (
AlarmControlPanelEntityFeature.ARM_HOME
| AlarmControlPanelEntityFeature.ARM_AWAY
| AlarmControlPanelEntityFeature.ARM_NIGHT
)
_element: Area
def __init__(self, element: Element, elk: Elk, elk_data: dict[str, Any]) -> None:
"""Initialize Area as Alarm Control Panel."""
super().__init__(element, elk, elk_data)
self._elk = elk
self._changed_by_keypad: str | None = None
self._changed_by_time: str | None = None
self._changed_by_id: int | None = None
self._changed_by: str | None = None
self._state: str | None = None
async def async_added_to_hass(self) -> None:
"""Register callback for ElkM1 changes."""
await super().async_added_to_hass()
if len(self._elk.areas.elements) == 1:
for keypad in self._elk.keypads:
keypad.add_callback(self._watch_keypad)
self._element.add_callback(self._watch_area)
# We do not get changed_by back from resync.
if not (last_state := await self.async_get_last_state()):
return
if ATTR_CHANGED_BY_KEYPAD in last_state.attributes:
self._changed_by_keypad = last_state.attributes[ATTR_CHANGED_BY_KEYPAD]
if ATTR_CHANGED_BY_TIME in last_state.attributes:
self._changed_by_time = last_state.attributes[ATTR_CHANGED_BY_TIME]
if ATTR_CHANGED_BY_ID in last_state.attributes:
self._changed_by_id = last_state.attributes[ATTR_CHANGED_BY_ID]
if ATTR_CHANGED_BY in last_state.attributes:
self._changed_by = last_state.attributes[ATTR_CHANGED_BY]
def _watch_keypad(self, keypad: Element, changeset: dict[str, Any]) -> None:
assert isinstance(keypad, Keypad)
if keypad.area != self._element.index:
return
if changeset.get("last_user") is not None:
self._changed_by_keypad = keypad.name
self._changed_by_time = keypad.last_user_time.isoformat()
self._changed_by_id = keypad.last_user + 1
self._changed_by = self._elk.users.username(keypad.last_user)
self.async_write_ha_state()
def _watch_area(self, area: Element, changeset: dict[str, Any]) -> None:
if not (last_log := changeset.get("last_log")):
return
# user_number only set for arm/disarm logs
if (user_number := last_log.get("user_number")) is None:
return
self._changed_by_keypad = None
self._changed_by_id = user_number
self._changed_by = self._elk.users.username(user_number - 1)
self._changed_by_time = last_log["timestamp"]
self.async_write_ha_state()
@property
def code_format(self) -> CodeFormat | None:
"""Return the alarm code format."""
return CodeFormat.NUMBER
@property
def state(self) -> str | None:
"""Return the state of the element."""
return self._state
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Attributes of the area."""
attrs = self.initial_attrs()
elmt = self._element
attrs["is_exit"] = elmt.is_exit
attrs["timer1"] = elmt.timer1
attrs["timer2"] = elmt.timer2
if elmt.armed_status is not None:
attrs["armed_status"] = ArmedStatus(elmt.armed_status).name.lower()
if elmt.arm_up_state is not None:
attrs["arm_up_state"] = ArmUpState(elmt.arm_up_state).name.lower()
if elmt.alarm_state is not None:
attrs["alarm_state"] = AlarmState(elmt.alarm_state).name.lower()
attrs[ATTR_CHANGED_BY_KEYPAD] = self._changed_by_keypad
attrs[ATTR_CHANGED_BY_TIME] = self._changed_by_time
attrs[ATTR_CHANGED_BY_ID] = self._changed_by_id
return attrs
@property
def changed_by(self) -> str | None:
"""Last change triggered by."""
return self._changed_by
def _element_changed(self, element: Element, changeset: dict[str, Any]) -> None:
elk_state_to_hass_state = {
ArmedStatus.DISARMED.value: STATE_ALARM_DISARMED,
ArmedStatus.ARMED_AWAY.value: STATE_ALARM_ARMED_AWAY,
ArmedStatus.ARMED_STAY.value: STATE_ALARM_ARMED_HOME,
ArmedStatus.ARMED_STAY_INSTANT.value: STATE_ALARM_ARMED_HOME,
ArmedStatus.ARMED_TO_NIGHT.value: STATE_ALARM_ARMED_NIGHT,
ArmedStatus.ARMED_TO_NIGHT_INSTANT.value: STATE_ALARM_ARMED_NIGHT,
ArmedStatus.ARMED_TO_VACATION.value: STATE_ALARM_ARMED_AWAY,
}
if self._element.alarm_state is None:
self._state = None
elif self._element.alarm_state >= AlarmState.FIRE_ALARM.value:
# Area is in alarm state
self._state = STATE_ALARM_TRIGGERED
elif self._entry_exit_timer_is_running():
self._state = (
STATE_ALARM_ARMING if self._element.is_exit else STATE_ALARM_PENDING
)
elif self._element.armed_status is not None:
self._state = elk_state_to_hass_state[self._element.armed_status]
else:
self._state = None
def _entry_exit_timer_is_running(self) -> bool:
return self._element.timer1 > 0 or self._element.timer2 > 0
async def async_alarm_disarm(self, code: str | None = None) -> None:
"""Send disarm command."""
if code is not None:
self._element.disarm(int(code))
async def async_alarm_arm_home(self, code: str | None = None) -> None:
"""Send arm home command."""
if code is not None:
self._element.arm(ArmLevel.ARMED_STAY.value, int(code))
async def async_alarm_arm_away(self, code: str | None = None) -> None:
"""Send arm away command."""
if code is not None:
self._element.arm(ArmLevel.ARMED_AWAY.value, int(code))
async def async_alarm_arm_night(self, code: str | None = None) -> None:
"""Send arm night command."""
if code is not None:
self._element.arm(ArmLevel.ARMED_NIGHT.value, int(code))
async def async_alarm_arm_home_instant(self, code: str | None = None) -> None:
"""Send arm stay instant command."""
if code is not None:
self._element.arm(ArmLevel.ARMED_STAY_INSTANT.value, int(code))
async def async_alarm_arm_night_instant(self, code: str | None = None) -> None:
"""Send arm night instant command."""
if code is not None:
self._element.arm(ArmLevel.ARMED_NIGHT_INSTANT.value, int(code))
async def async_alarm_arm_vacation(self, code: str | None = None) -> None:
"""Send arm vacation command."""
if code is not None:
self._element.arm(ArmLevel.ARMED_VACATION.value, int(code))
async def async_display_message(
self, clear: int, beep: bool, timeout: int, line1: str, line2: str
) -> None:
"""Display a message on all keypads for the area."""
self._element.display_message(clear, beep, timeout, line1, line2)
async def async_bypass(self, code: str | None = None) -> None:
"""Bypass all zones in area."""
if code is not None:
self._element.bypass(int(code))
async def async_clear_bypass(self, code: str | None = None) -> None:
"""Clear bypass for all zones in area."""
if code is not None:
self._element.clear_bypass(int(code))
| 38.901408
| 85
| 0.68483
|
debfcf781a6bd11d14f27ca6dfaceca12a814f02
| 351
|
py
|
Python
|
dataformat-json-jackson/src/test/resources/org/camunda/spin/python/json/tree/JsonTreeReadPropertyPythonTest.shouldCheckNumberValue.py
|
ingorichtsmeier/camunda-spin
|
f6f929cb4b49f5be3c06fcecf03008fec9fe25c1
|
[
"Apache-2.0"
] | 27
|
2015-02-15T22:01:39.000Z
|
2022-03-02T05:41:29.000Z
|
dataformat-json-jackson/src/test/resources/org/camunda/spin/python/json/tree/JsonTreeReadPropertyPythonTest.shouldCheckNumberValue.py
|
ingorichtsmeier/camunda-spin
|
f6f929cb4b49f5be3c06fcecf03008fec9fe25c1
|
[
"Apache-2.0"
] | 101
|
2015-06-05T06:53:56.000Z
|
2022-02-28T19:32:44.000Z
|
dataformat-json-jackson/src/test/resources/org/camunda/spin/python/json/tree/JsonTreeReadPropertyPythonTest.shouldCheckNumberValue.py
|
ingorichtsmeier/camunda-spin
|
f6f929cb4b49f5be3c06fcecf03008fec9fe25c1
|
[
"Apache-2.0"
] | 25
|
2015-05-26T21:28:42.000Z
|
2021-07-06T10:04:01.000Z
|
node = S(input, "application/json")
property1 = node.prop("order")
property2 = node.prop("id")
property3 = node.prop("customers")
property4 = node.prop("orderDetails")
property5 = node.prop("active")
value1 = property1.isNumber()
value2 = property2.isNumber()
value3 = property3.isNumber()
value4 = property4.isNumber()
value5 = property5.isNumber()
| 27
| 37
| 0.737892
|
6ee81ce2714243b7e9f5f46c7d63f5c591991d1f
| 2,699
|
py
|
Python
|
djaesy/urls.py
|
alan-fernandes/django-djaesy
|
bc95d65af8ad08a8bbe7275dbccabec581f4f2ac
|
[
"MIT"
] | null | null | null |
djaesy/urls.py
|
alan-fernandes/django-djaesy
|
bc95d65af8ad08a8bbe7275dbccabec581f4f2ac
|
[
"MIT"
] | null | null | null |
djaesy/urls.py
|
alan-fernandes/django-djaesy
|
bc95d65af8ad08a8bbe7275dbccabec581f4f2ac
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.conf.urls import i18n
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import LogoutView
from django.urls import path, include, re_path
from djaesy.base_views import MapListView, BaseView
from djaesy.login_views import Login, ResetPassword, ResetPasswordDone, ResetPasswordConfirm, \
ResetPasswordComplete
from djaesy.utils import load_view
from djaesy.views import UserList, UserCreate, UserUpdate, UserChangePassword, UserChangePasswordDone, \
RoleList, RoleCreate, RoleUpdate, SetLanguage, DjaesyTabViewWrapper
user_list_view = getattr(settings, 'DJAESY_USER_LIST_VIEW', UserList)
user_create_view = getattr(settings, 'DJAESY_USER_CREATE_VIEW', UserCreate)
user_update_view = getattr(settings, 'DJAESY_USER_UPDATE_VIEW', UserUpdate)
if isinstance(user_list_view, str):
user_list_view = load_view(user_list_view)
if isinstance(user_create_view, str):
user_create_view = load_view(user_create_view)
if isinstance(user_update_view, str):
user_update_view = load_view(user_update_view)
urlpatterns = [
path('', login_required(BaseView.as_view()), name='djaesy_main'),
re_path('app/(?P<path>.*)$', DjaesyTabViewWrapper.as_view(), name='djaesy_tabview'),
path('map/test', MapListView.as_view(), name='map_test'),
path('login/', Login.as_view(), name='login'),
path('logout/', LogoutView.as_view(), name='logout'),
path('login/reset-password', ResetPassword.as_view(), name='login_reset_password'),
path('login/reset-password/success', ResetPasswordDone.as_view(), name='password_reset_done'),
path('login/reset-password-confirm/<uidb64>/<token>/', ResetPasswordConfirm.as_view(), name='password_reset_confirm'),
path('login/reset-password-complete', ResetPasswordComplete.as_view(), name='password_reset_complete'),
path('user/list', login_required(user_list_view.as_view()), name='user_list'),
path('user/create', login_required(user_create_view.as_view()), name='user_create'),
path('user/update/<pk>', login_required(user_update_view.as_view()), name='user_update'),
path('user/change-password', login_required(UserChangePassword.as_view()), name='user_change_password'),
path('user/change-password/success', login_required(UserChangePasswordDone.as_view()), name='user_change_password_done'),
path('user/role/list', login_required(RoleList.as_view()), name='user_role_list'),
path('user/role/create', login_required(RoleCreate.as_view()), name='user_role_create'),
path('user/role/update/<pk>', login_required(RoleUpdate.as_view()), name='user_role_update'),
path('i18n/', include('django.conf.urls.i18n')),
]
| 51.903846
| 125
| 0.767692
|
5a5cc8ca210a1566ac6b88aaf0e5470559d8b696
| 2,511
|
py
|
Python
|
python/v1/bidders/endpoints/get_endpoints.py
|
Manny27nyc/authorized-buyers-rtb-api-samples
|
8074695ddf773223d6c22ed2e1914f96b2c43049
|
[
"Apache-2.0"
] | 4
|
2021-01-21T04:17:50.000Z
|
2021-12-26T19:34:18.000Z
|
python/v1/bidders/endpoints/get_endpoints.py
|
Manny27nyc/authorized-buyers-rtb-api-samples
|
8074695ddf773223d6c22ed2e1914f96b2c43049
|
[
"Apache-2.0"
] | 2
|
2021-03-04T09:19:23.000Z
|
2021-06-02T21:20:02.000Z
|
python/v1/bidders/endpoints/get_endpoints.py
|
Manny27nyc/authorized-buyers-rtb-api-samples
|
8074695ddf773223d6c22ed2e1914f96b2c43049
|
[
"Apache-2.0"
] | 5
|
2020-12-24T10:53:04.000Z
|
2021-12-08T08:27:49.000Z
|
#!/usr/bin/python
#
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gets a single endpoint for the specified bidder and endpoint IDs."""
import argparse
import os
import pprint
import sys
sys.path.insert(0, os.path.abspath('../../..'))
from googleapiclient.errors import HttpError
import util
_ENDPOINTS_NAME_TEMPLATE = 'bidders/%s/endpoints/%s'
DEFAULT_BIDDER_RESOURCE_ID = 'ENTER_BIDDER_RESOURCE_ID_HERE'
DEFAULT_ENDPOINT_RESOURCE_ID = 'ENTER_ENDPOINT_RESOURCE_ID_HERE'
def main(realtimebidding, account_id, endpoint_id):
print(f'Get endpoint with ID "{endpoint_id}" for bidder account with ID '
f'"{account_id}":')
try:
# Construct and execute the request.
response = realtimebidding.bidders().endpoints().get(
name=_ENDPOINTS_NAME_TEMPLATE % (account_id, endpoint_id)).execute()
except HttpError as e:
print(e)
sys.exit(1)
pprint.pprint(response)
if __name__ == '__main__':
try:
service = util.GetService(version='v1')
except IOError as ex:
print(f'Unable to create realtimebidding service - {ex}')
print('Did you specify the key file in util.py?')
sys.exit(1)
parser = argparse.ArgumentParser(
description=('Get an endpoint for the given bidder and endpoint IDs.'))
# Required fields.
parser.add_argument(
'-a', '--account_id', default=DEFAULT_BIDDER_RESOURCE_ID,
help=('The resource ID of the bidders resource under which the endpoint '
'exists. This will be used to construct the name used as a path '
'parameter for the endpoints.get request.'))
parser.add_argument(
'-e', '--endpoint_id', default=DEFAULT_ENDPOINT_RESOURCE_ID,
help=('The resource ID of the endpoints resource that is being '
'retrieved. This will be used to construct the name used as a '
'path parameter for the endpoints.get request.'))
args = parser.parse_args()
main(service, args.account_id, args.endpoint_id)
| 32.192308
| 79
| 0.719235
|
e4b766f9e5c9245ff768f1757a9d30b298ac576c
| 1,889
|
py
|
Python
|
source/engine/engine.py
|
Borrk/DeepLearning-Engine
|
54f6cdb8a76e76d9f439f8562652f545e4dbc02e
|
[
"MIT"
] | null | null | null |
source/engine/engine.py
|
Borrk/DeepLearning-Engine
|
54f6cdb8a76e76d9f439f8562652f545e4dbc02e
|
[
"MIT"
] | null | null | null |
source/engine/engine.py
|
Borrk/DeepLearning-Engine
|
54f6cdb8a76e76d9f439f8562652f545e4dbc02e
|
[
"MIT"
] | null | null | null |
import json
import os
import glob
import importlib
from utils.step_loader import *
class ai_engine(object):
""" the engine """
global_cache={}
steps = []
def __init__(self ):
self.global_cache['error_type'] = None
self.global_cache['error_msg'] = ''
def load_steps( self, config_file ):
print("Loading steps...")
# clear steps
self.dispose()
file = open(config_file, "r")
engine_cfg = json.load(file)
options = engine_cfg['step_options']
# initialize and config steps
for step_cfg in engine_cfg['steps']:
if step_cfg['active'] == True:
step = load_step( step_cfg['module'], step_cfg['name'] )
# get step options
step_options_name = step_cfg['options']
step_options = options[step_options_name]
# initialize step
step.__init__( step, output_channel=self.global_cache, name= step_cfg['name'] )
step.IParseConfig(step, config_json= step_options )
self.steps.append( step )
print( "step initialized: " + step.name )
def run(self):
print("Running engine...")
if len(self.steps) < 1:
raise Exception( "No steps created")
# run step by step
for step in self.steps:
step.IRun(step)
if self.global_cache['error_type'] == 'fatal':
raise Exception( self.global_cache['error_msg'] )
self.global_cache['error_type'] = None
self.global_cache['error_msg'] = ''
def run_from_file(self, config_file):
self.load_steps( config_file)
self.run()
def dispose(self):
print("Disposing steps...")
for step in self.steps:
step.IDispose(step)
self.steps.clear()
| 26.605634
| 95
| 0.56432
|
f9d2488f4b156f7eecf300cdf6ccdd9e28a7edd0
| 520
|
py
|
Python
|
svm/pad_na.py
|
kundajelab/bias_correction
|
521678ea8739473f793b0ce85e22e622d13df6fe
|
[
"MIT"
] | null | null | null |
svm/pad_na.py
|
kundajelab/bias_correction
|
521678ea8739473f793b0ce85e22e622d13df6fe
|
[
"MIT"
] | null | null | null |
svm/pad_na.py
|
kundajelab/bias_correction
|
521678ea8739473f793b0ce85e22e622d13df6fe
|
[
"MIT"
] | null | null | null |
import sys
task=sys.argv[1]
fold=sys.argv[2]
out_prefix="/srv/scratch/annashch/5_cell_lines_bias_correction/svm"
data=open(out_prefix+"/"+task+"/"+"svm_predictions_svmtrainset_genometestset"+"/"+"labels."+fold+".bed",'r').read().strip().split('\n')
outf=open(out_prefix+"/"+task+"/"+"svm_predictions_svmtrainset_genometestset"+"/"+"labels."+fold+'.filled.bed','w')
for line in data:
tokens=line.split('\t')
if tokens[-1]!="":
outf.write(line+'\n')
else:
outf.write(line+'NA\n')
outf.close()
| 37.142857
| 135
| 0.669231
|
5bcaf36c6748ebd5d7fa17147f8fde6a519cc662
| 6,069
|
py
|
Python
|
debandshit/debanders.py
|
LightArrowsEXE/debandshit
|
960c83828a60ee6c1fa28ad6c834eba50275aefc
|
[
"MIT"
] | 6
|
2020-05-02T06:31:21.000Z
|
2020-11-02T14:19:10.000Z
|
debandshit/debanders.py
|
LightArrowsEXE/vs-debandshit
|
960c83828a60ee6c1fa28ad6c834eba50275aefc
|
[
"MIT"
] | 1
|
2020-05-02T21:13:31.000Z
|
2020-05-02T21:13:31.000Z
|
debandshit/debanders.py
|
LightArrowsEXE/vs-debandshit
|
960c83828a60ee6c1fa28ad6c834eba50275aefc
|
[
"MIT"
] | 1
|
2020-05-02T20:22:31.000Z
|
2020-05-02T20:22:31.000Z
|
"""
Various functions used for debanding.
This used to be the `debandshit` module written by Z4ST1N,
with some functions that were rarely (if ever) used removed because I can't reasonably maintain them.
"""
from typing import Any, Dict, List, Optional, Union
import vapoursynth as vs
from vsutil import depth
from .f3kdb import SAMPLEMODE, F3kdb, SampleMode
from .placebo import Placebo
core = vs.core
__all__ = ['dumb3kdb', 'f3kbilateral', 'f3kpf', 'lfdeband', 'placebo_deband']
def dumb3kdb(clip: vs.VideoNode, radius: int = 16,
threshold: Union[int, List[int]] = 30, grain: Union[int, List[int]] = 0,
sample_mode: Union[SAMPLEMODE, SampleMode] = 2, use_neo: bool = False, **kwargs: Any) -> vs.VideoNode:
"""Small convenience function for calling F3kdb().deband()."""
return F3kdb(radius, threshold, grain, sample_mode, use_neo, **kwargs).deband(clip)
def f3kbilateral(clip: vs.VideoNode, radius: int = 16,
threshold: Union[int, List[int]] = 65, grain: Union[int, List[int]] = 0,
f3kdb_args: Optional[Dict[str, Any]] = None,
limflt_args: Optional[Dict[str, Any]] = None) -> vs.VideoNode:
"""
f3kbilateral: f3kdb multistage bilateral-esque filter from debandshit.
This function is more of a last resort for extreme banding.
Recommend values are ~40-60 for y and c strengths.
Dependencies:
* mvsfunc
:param clip: Input clip
:param radius: Same as F3kdb constructor.
:param threshold: Same as F3kdb constructor.
:param grain: Same as F3kdb constructor.
It happens after mvsfunc.LimitFilter
and call another instance of F3kdb if != 0.
:f3kdb_args: Same as F3kdb kwargs constructor.
:lf_args: Arguments passed to mvsfunc.LimitFilter.
:return: Debanded clip
"""
try:
from mvsfunc import LimitFilter
except ModuleNotFoundError as mod_err:
raise ModuleNotFoundError("f3kbilateral: missing dependency 'mvsfunc'") from mod_err
if clip.format is None:
raise ValueError("f3kbilateral: 'Variable-format clips not supported'")
bits = clip.format.bits_per_sample
f3_args: Dict[str, Any] = dict()
if f3kdb_args is not None:
f3_args |= f3kdb_args
lf_args: Dict[str, Any] = dict(thr=0.6, elast=3.0, thrc=None)
if limflt_args is not None:
lf_args |= limflt_args
rad1 = round(radius * 4 / 3)
rad2 = round(radius * 2 / 3)
rad3 = round(radius / 3)
db1 = F3kdb(rad1, threshold, 0, **f3_args)
db2 = F3kdb(rad2, threshold, 0, **f3_args)
db3 = F3kdb(rad3, threshold, 0, **f3_args)
# Edit the thr of first f3kdb object
db1.thy, db1.thcb, db1.thcr = [max(1, th // 2) for th in (db1.thy, db1.thcb, db1.thcr)]
clip = depth(clip, 16)
flt1 = db1.deband(clip)
flt2 = db2.deband(flt1)
flt3 = db3.deband(flt2)
limit = LimitFilter(flt3, flt2, ref=clip, **lf_args)
if grain:
grained = F3kdb(grain=grain, **f3_args).grain(limit)
else:
grained = limit
return depth(grained, bits)
def f3kpf(clip: vs.VideoNode, radius: int = 16,
threshold: Union[int, List[int]] = 30, grain: Union[int, List[int]] = 0,
f3kdb_args: Optional[Dict[str, Any]] = None,
limflt_args: Optional[Dict[str, Any]] = None) -> vs.VideoNode:
"""
f3kdb with a simple prefilter by mawen1250 - https://www.nmm-hd.org/newbbs/viewtopic.php?f=7&t=1495#p12163.
Since the prefilter is a straight gaussian+average blur, f3kdb's effect becomes very strong, very fast.
Functions more or less like gradfun3 without the detail mask.
Dependencies:
* mvsfunc
:param clip: Input clip
:param radius: Banding detection range
:param threshold: Banding detection thresholds for multiple planes
:param f3kdb_args: Arguments passed to F3kdb constructor
:param limflt_args: Arguments passed to mvsfunc.LimitFilter
:return: Debanded clip
"""
try:
from mvsfunc import LimitFilter
except ModuleNotFoundError as mod_err:
raise ModuleNotFoundError("f3kpf: missing dependency 'mvsfunc'") from mod_err
if clip.format is None:
raise ValueError("f3kpf: 'Variable-format clips not supported'")
f3_args: Dict[str, Any] = dict()
if f3kdb_args is not None:
f3_args |= f3kdb_args
lf_args: Dict[str, Any] = dict(thr=0.3, elast=2.5, thrc=None)
if limflt_args is not None:
lf_args |= limflt_args
blur = core.std.Convolution(clip, [1, 2, 1, 2, 4, 2, 1, 2, 1]).std.Convolution([1] * 9, planes=0)
diff = core.std.MakeDiff(clip, blur)
deband = F3kdb(radius, threshold, grain, **f3_args).deband(blur)
deband = LimitFilter(deband, blur, **lf_args)
return core.std.MergeDiff(deband, diff)
def lfdeband(clip: vs.VideoNode) -> vs.VideoNode:
"""
A simple debander ported from AviSynth.
:param clip: Input clip
:return: Debanded clip
"""
if clip.format is None:
raise ValueError("lfdeband: 'Variable-format clips not supported'")
bits = clip.format.bits_per_sample
wss, hss = 1 << clip.format.subsampling_w, 1 << clip.format.subsampling_h
w, h = clip.width, clip.height
dw, dh = round(w / 2), round(h / 2)
clip = depth(clip, 16)
dsc = core.resize.Spline64(clip, dw-dw % wss, dh-dh % hss)
d3kdb = F3kdb(radius=30, threshold=80, grain=0).deband(clip)
ddif = core.std.MakeDiff(d3kdb, dsc)
dif = core.resize.Spline64(ddif, w, h)
out = core.std.MergeDiff(clip, dif)
return depth(out, bits)
def placebo_deband(clip: vs.VideoNode, radius: float = 16.0, threshold: Union[float, List[float]] = 4.0,
iterations: int = 1, grain: Union[float, List[float]] = 6.0, **kwargs: Any) -> vs.VideoNode:
"""Small convenience function for calling Placebo().deband()."""
return Placebo(radius, threshold, iterations, grain, **kwargs).deband(clip)
| 34.87931
| 115
| 0.647718
|
32d24f0d41af43ecbaaadc4e3497f56ee6a4ef7c
| 416
|
py
|
Python
|
testproject/core/urls.py
|
ravillarreal/django-channels-jwt-auth-middleware
|
b53fc009ce7dd867eca436f18e0ad03c0f0c80be
|
[
"MIT"
] | 9
|
2021-01-21T16:26:48.000Z
|
2021-09-20T01:47:58.000Z
|
testproject/core/urls.py
|
ravillarreal/django-channels-jwt-auth-middleware
|
b53fc009ce7dd867eca436f18e0ad03c0f0c80be
|
[
"MIT"
] | null | null | null |
testproject/core/urls.py
|
ravillarreal/django-channels-jwt-auth-middleware
|
b53fc009ce7dd867eca436f18e0ad03c0f0c80be
|
[
"MIT"
] | 5
|
2021-04-11T09:40:44.000Z
|
2021-06-09T08:05:48.000Z
|
from django.contrib import admin
from django.urls import path, include
def api_url(base_url=''):
api_base_url = 'api/v1'
if not base_url:
return api_base_url + '/'
return f'{api_base_url}/{base_url}/'
urlpatterns = [
path('admin/', admin.site.urls),
path(api_url('auth'), include('dj_rest_auth.urls')),
path(api_url('auth/register'), include('dj_rest_auth.registration.urls')),
]
| 24.470588
| 78
| 0.675481
|
7cbeffff05e02994c97ccb03528afb299a45528d
| 36,600
|
py
|
Python
|
pynetdicom/events.py
|
Jesse-Back/pynetdicom
|
68a12acdbdfb05a2f3c8fd93f25429550cce0347
|
[
"MIT"
] | 274
|
2019-01-04T01:45:13.000Z
|
2022-03-29T13:59:46.000Z
|
pynetdicom/events.py
|
Jesse-Back/pynetdicom
|
68a12acdbdfb05a2f3c8fd93f25429550cce0347
|
[
"MIT"
] | 386
|
2018-12-28T10:46:44.000Z
|
2022-03-26T00:57:10.000Z
|
pynetdicom/events.py
|
Jesse-Back/pynetdicom
|
68a12acdbdfb05a2f3c8fd93f25429550cce0347
|
[
"MIT"
] | 100
|
2019-01-18T13:43:42.000Z
|
2022-03-23T13:35:39.000Z
|
"""Module used to support events and event handling, not to be confused with
the state machine events.
"""
from datetime import datetime
import inspect
import logging
from pathlib import Path
import sys
from typing import (
Union, Callable, Any, Tuple, List, NamedTuple, Optional, TYPE_CHECKING,
Dict, cast, Iterator
)
from pydicom.dataset import Dataset, FileMetaDataset
from pydicom.filereader import dcmread
from pydicom.tag import BaseTag
from pydicom.uid import UID
from pynetdicom.dsutils import decode, create_file_meta
if TYPE_CHECKING: # pragma: no cover
from pynetdicom.association import Association
from pynetdicom.dimse_messages import (
C_ECHO_RQ, C_FIND_RQ, C_GET_RQ, C_MOVE_RQ, C_STORE_RQ,
N_ACTION_RQ, N_CREATE_RQ, N_DELETE_RQ, N_EVENT_REPORT_RQ, N_GET_RQ,
N_SET_RQ, DIMSEMessage
)
from pynetdicom.pdu import _PDUType
from pynetdicom.pdu_primitives import (
SOPClassCommonExtendedNegotiation
)
from pynetdicom.presentation import PresentationContextTuple
_RequestType = Union[
C_ECHO_RQ, C_FIND_RQ, C_GET_RQ, C_MOVE_RQ, C_STORE_RQ,
N_ACTION_RQ, N_CREATE_RQ, N_DELETE_RQ, N_EVENT_REPORT_RQ, N_GET_RQ,
N_SET_RQ
]
LOGGER = logging.getLogger('pynetdicom.events')
EventType = Union["NotificationEvent", "InterventionEvent"]
EventHandlerType = Union[
Tuple[EventType, Callable],
Tuple[EventType, Callable, List[Any]]
]
_BasicReturnType = Union[Dataset, int]
_DatasetReturnType = Tuple[_BasicReturnType, Optional[Dataset]]
_IteratorType = Iterator[Tuple[_BasicReturnType, Optional[Dataset]]]
# Notification events
# No returns/yields needed, can have multiple handlers per event
class NotificationEvent(NamedTuple):
"""Representation of a notification event.
.. versionadded:: 1.3
Possible notification events are:
* :class:`EVT_ABORTED`
* :class:`EVT_ACCEPTED`
* :class:`EVT_ACSE_RECV`
* :class:`EVT_ACSE_SENT`
* :class:`EVT_CONN_CLOSE`
* :class:`EVT_CONN_OPEN`
* :class:`EVT_DATA_RECV`
* :class:`EVT_DATA_SENT`
* :class:`EVT_DIMSE_RECV`
* :class:`EVT_DIMSE_SENT`
* :class:`EVT_ESTABLISHED`
* :class:`EVT_FSM_TRANSITION`
* :class:`EVT_PDU_RECV`
* :class:`EVT_PDU_SENT`
* :class:`EVT_REJECTED`
* :class:`EVT_RELEASED`
* :class:`EVT_REQUESTED`
"""
name: str
description: str
is_intervention: bool = False
is_notification: bool = True
# pylint: disable=line-too-long
EVT_ABORTED = NotificationEvent("EVT_ABORTED", "Association aborted")
EVT_ACCEPTED = NotificationEvent("EVT_ACCEPTED", "Association request accepted") # noqa
EVT_ACSE_RECV = NotificationEvent("EVT_ACSE_RECV", "ACSE primitive received from DUL") # noqa
EVT_ACSE_SENT = NotificationEvent("EVT_ACSE_SENT", "ACSE primitive sent to DUL") # noqa
EVT_CONN_CLOSE = NotificationEvent("EVT_CONN_CLOSE", "Connection closed")
EVT_CONN_OPEN = NotificationEvent("EVT_CONN_OPEN", "Connection opened")
EVT_DATA_RECV = NotificationEvent("EVT_DATA_RECV", "PDU data received from remote") # noqa
EVT_DATA_SENT = NotificationEvent("EVT_DATA_SENT", "PDU data sent to remote")
EVT_DIMSE_RECV = NotificationEvent("EVT_DIMSE_RECV", "Complete DIMSE message received and decoded") # noqa
EVT_DIMSE_SENT = NotificationEvent("EVT_DIMSE_SENT", "DIMSE message encoded and P-DATA primitives sent to DUL") # noqa
EVT_ESTABLISHED = NotificationEvent("EVT_ESTABLISHED", "Association established") # noqa
EVT_FSM_TRANSITION = NotificationEvent("EVT_FSM_TRANSITION", "State machine about to transition") # noqa
EVT_PDU_RECV = NotificationEvent("EVT_PDU_RECV", "PDU received and decoded")
EVT_PDU_SENT = NotificationEvent("EVT_PDU_SENT", "PDU encoded and sent")
EVT_REJECTED = NotificationEvent("EVT_REJECTED", "Association request rejected") # noqa
EVT_RELEASED = NotificationEvent("EVT_RELEASED", "Association released")
EVT_REQUESTED = NotificationEvent("EVT_REQUESTED", "Association requested")
# Intervention events
# Returns/yields needed if bound, can only have one handler per event
class InterventionEvent(NamedTuple):
"""Representation of an intervention event.
.. versionadded:: 1.3
Possible intervention events are:
* :class:`EVT_ASYNC_OPS`
* :class:`EVT_SOP_COMMON`
* :class:`EVT_SOP_EXTENDED`
* :class:`EVT_USER_ID`
* :class:`EVT_C_ECHO`
* :class:`EVT_C_FIND`
* :class:`EVT_C_GET`
* :class:`EVT_C_MOVE`
* :class:`EVT_C_STORE`
* :class:`EVT_N_ACTION`
* :class:`EVT_N_CREATE`
* :class:`EVT_N_DELETE`
* :class:`EVT_N_EVENT_REPORT`
* :class:`EVT_N_GET`
* :class:`EVT_N_SET`
"""
name: str
description: str
is_intervention: bool = True
is_notification: bool = False
EVT_ASYNC_OPS = InterventionEvent("EVT_ASYNC_OPS", "Asynchronous operations negotiation requested") # noqa
EVT_SOP_COMMON = InterventionEvent("EVT_SOP_COMMON", "SOP class common extended negotiation requested") # noqa
EVT_SOP_EXTENDED = InterventionEvent("EVT_SOP_EXTENDED", "SOP class extended negotiation requested") # noqa
EVT_USER_ID = InterventionEvent("EVT_USER_ID", "User identity negotiation requested") # noqa
EVT_C_ECHO = InterventionEvent("EVT_C_ECHO", "C-ECHO request received")
EVT_C_FIND = InterventionEvent("EVT_C_FIND", "C-FIND request received")
EVT_C_GET = InterventionEvent("EVT_C_GET", "C-GET request received")
EVT_C_MOVE = InterventionEvent("EVT_C_MOVE", "C-MOVE request received")
EVT_C_STORE = InterventionEvent("EVT_C_STORE", "C-STORE request received")
EVT_N_ACTION = InterventionEvent("EVT_N_ACTION", "N-ACTION request received")
EVT_N_CREATE = InterventionEvent("EVT_N_CREATE", "N-CREATE request received")
EVT_N_DELETE = InterventionEvent("EVT_N_DELETE", "N-DELETE request received")
EVT_N_EVENT_REPORT = InterventionEvent("EVT_N_EVENT_REPORT", "N-EVENT-REPORT request received") # noqa
EVT_N_GET = InterventionEvent("EVT_N_GET", "N-GET request received")
EVT_N_SET = InterventionEvent("EVT_N_SET", "N-SET request received")
# pylint: enable=line-too-long
_INTERVENTION_EVENTS = [
ii[1] for ii in inspect.getmembers(
sys.modules[__name__], lambda x: isinstance(x, InterventionEvent)
)
]
_NOTIFICATION_EVENTS = [
ii[1] for ii in inspect.getmembers(
sys.modules[__name__], lambda x: isinstance(x, NotificationEvent)
)
]
_HandlerBase = Tuple[Callable, Optional[List[Any]]]
_NotificationHandlerAttr = List[_HandlerBase]
_InterventionHandlerAttr = _HandlerBase
HandlerArgType = Union[_NotificationHandlerAttr, _InterventionHandlerAttr]
_HandlerAttr = Dict[EventType, HandlerArgType]
def _add_handler(
event: EventType, handlers_attr: _HandlerAttr, handler_arg: _HandlerBase
) -> None:
"""Add a handler to an object's handler recording attribute.
Parameters
----------
event : NotificationEvent or InterventionEvent
The event the handler should be bound to.
handlers_attr : dict
The object attribute of {event: Union[
[(handler, Optional[args])],
(handler, Optional[args])
]} used to record bindings.
handler_arg : Tuple[Callable, Optional[List[Any]]]
The handler and optional arguments to be bound.
"""
if isinstance(event, NotificationEvent):
if event not in handlers_attr:
handlers_attr[event] = []
if handler_arg not in handlers_attr[event]:
h = cast(_NotificationHandlerAttr, handlers_attr[event])
h.append(handler_arg)
elif isinstance(event, InterventionEvent):
# Intervention events - only one handler allowed
handlers_attr[event] = handler_arg
def _remove_handler(
event: EventType, handlers_attr: _HandlerAttr, handler: Callable
) -> None:
"""Remove a handler from an object's handler recording attribute.
Parameters
----------
event : NotificationEvent or InterventionEvent
The event the handler should be unbound from.
handlers_attr : dict
The object attribute of
{
event: Union[
List[(handler, Optional[args])],
(handler, Optional[args])
]
} used to record bindings.
handler_arg : Callable
The handler to be unbound.
"""
if event not in handlers_attr:
return
if isinstance(event, NotificationEvent):
handlers_list = cast(_NotificationHandlerAttr, handlers_attr[event])
handlers_attr[event] = [h for h in handlers_list if h[0] != handler]
if not handlers_attr[event]:
del handlers_attr[event]
elif isinstance(event, InterventionEvent):
# Unbind and replace with default
if handler in handlers_attr[event]:
handlers_attr[event] = (get_default_handler(event), None)
def get_default_handler(event: InterventionEvent) -> Callable[["Event"], Any]:
"""Return the default handler for an intervention `event`.
.. versionadded:: 1.3
"""
handlers = {
EVT_ASYNC_OPS: _async_ops_handler,
EVT_SOP_COMMON: _sop_common_handler,
EVT_SOP_EXTENDED: _sop_extended_handler,
EVT_USER_ID: _user_identity_handler,
EVT_C_ECHO: _c_echo_handler,
EVT_C_FIND: _c_find_handler,
EVT_C_GET: _c_get_handler,
EVT_C_MOVE: _c_move_handler,
EVT_C_STORE: _c_store_handler,
EVT_N_ACTION: _n_action_handler,
EVT_N_CREATE: _n_create_handler,
EVT_N_DELETE: _n_delete_handler,
EVT_N_EVENT_REPORT: _n_event_report_handler,
EVT_N_GET: _n_get_handler,
EVT_N_SET: _n_set_handler,
}
return handlers[event]
def trigger(
assoc: "Association",
event: EventType,
attrs: Optional[Dict[str, Any]] = None
) -> Optional[Any]:
"""Trigger an `event` and call any bound handler(s).
.. versionadded:: 1.3
Notification events can be bound to multiple handlers, intervention events
can only be bound to a single handler.
**Special Attributes**
If `attrs` contains:
* `_is_cancelled` key then :attr:`Event.is_cancelled` will be hooked into
the value's callable function.
* a C-FIND, C-GET or C-MOVE `request` key then :attr:`Event.identifier`
will return the decoded *Identifier* parameter value.
* a C-STORE `request` key then :attr:`Event.dataset` will return the
decoded *Data Set* parameter value.
* an N-ACTION `request` key then :attr:`Event.action_information` will
return the decoded *Action Information* parameter value.
* an N-CREATE `request` key then :attr:`Event.attribute_list` will return
the decoded *Attribute List* parameter value.
* an N-EVENT-REPORT `request` key then :attr:`Event.event_information` will
return the decoded *Event Information* parameter value.
* an N-SET `request` key then :attr:`Event.modification_list` will return
the decoded *Modification List* parameter value.
Parameters
----------
assoc : assoc.Association
The association in which the event occurred.
event : events.NotificationEvent or events.InterventionEvent
The event to trigger.
attrs : dict, optional
The attributes to set in the :class:`Event` instance that is passed to
the event's corresponding handler functions as
``{attribute name : value}``, default ``{}``.
Raises
------
Exception
If an exception occurs in an intervention event handler then the
exception will be raised. If an exception occurs in a notification
handler then the exception will be caught and logged instead.
"""
# Get the handler(s) bound to the event
# notification events: returns a list of 2-tuple (callable, args)
# intervention events: returns a 2-tuple of (callable, args)
# or (None, None)
handlers = assoc.get_handlers(event)
# Empty list or (None, None)
if not handlers or handlers[0] is None:
return None
evt = Event(assoc, event, attrs or {})
try:
# Intervention event - only single handler allowed
if isinstance(event, InterventionEvent):
handlers = cast(_InterventionHandlerAttr, handlers)
if handlers[1] is not None:
return handlers[0](evt, *handlers[1])
return handlers[0](evt)
# Notification event - multiple handlers are allowed
handlers = cast(_NotificationHandlerAttr, handlers)
for func, args in handlers:
if args:
func(evt, *args)
else:
func(evt)
except Exception as exc:
# Intervention exceptions get raised
if isinstance(event, InterventionEvent):
raise
# Capture exceptions for notification events
LOGGER.error(
f"Exception raised in user's 'evt.{event.name}' "
f"event handler '{func.__name__}'"
)
LOGGER.exception(exc)
return None
class Event:
"""Representation of an event.
.. versionadded:: 1.3
.. warning::
Some of :class:`Event`'s attributes are set dynamically when an event is
triggered and are available only for a specific event. For example, the
``Event.context`` attribute is only available for events such as
``evt.EVT_C_ECHO``, ``evt.EVT_C_STORE``, etc. See the
:ref:`handler documentation<api_events>` for a list of what attributes
are available for a given event.
Attributes
----------
assoc : association.Association
The association in which the event occurred.
timestamp : datetime.datetime
The date/time the event was created. Will be slightly before or after
the actual event that this object represents.
"""
def __init__(
self,
assoc: "Association",
event: EventType,
attrs: Optional[Dict[str, Any]] = None
) -> None:
"""Create a new Event.
Parameters
----------
assoc : association.Association
The association in which the event occurred.
event : events.NotificationEvent or events.InterventionEvent
The representation of the event that occurred.
attrs : dict, optional
The ``{attribute : value}`` pairs to use to set the
:class:`Event`'s attributes.
"""
self.assoc = assoc
self._event = event
self.timestamp = datetime.now()
# Only decode a dataset when necessary
self._hash: Optional[int] = None
self._decoded: Optional[Dataset] = None
# Define type hints for dynamic attributes
self.request: "_RequestType"
self._is_cancelled: Callable[[int], bool]
self.context: "PresentationContextTuple"
self.current_state: str
self.fsm_event: str
self.next_state: str
self.action: str
self.data: bytes
self.message: DIMSEMessage
self.pdu: _PDUType
attrs = attrs or {}
for kk, vv in attrs.items():
if hasattr(self, kk):
raise AttributeError(
f"'Event' object already has an attribute '{kk}'"
)
setattr(self, kk, vv)
@property
def action_information(self) -> Dataset:
"""Return an N-ACTION request's `Action Information` as a *pydicom*
:class:`~pydicom.dataset.Dataset`.
Because *pydicom* defers data parsing during decoding until an element
is actually required the returned :class:`~pydicom.dataset.Dataset`
may raise an exception when any element is first accessed. It's
therefore important that proper error handling be part of any handler
that uses the returned :class:`~pydicom.dataset.Dataset`.
Returns
-------
pydicom.dataset.Dataset
The decoded *Action Information* dataset.
Raises
------
AttributeError
If the corresponding event is not an N-ACTION request.
"""
msg = (
"The corresponding event is not an N-ACTION request and has no "
"'Action Information' parameter"
)
return self._get_dataset("ActionInformation", msg)
@property
def action_type(self) -> int:
"""Return an N-ACTION request's `Action Type ID` as an :class:`int`.
.. versionadded:: 1.4
Returns
-------
int
The request's (0000,1008) *Action Type ID* value.
Raises
------
AttributeError
If the corresponding event is not an N-ACTION request.
"""
try:
req = cast("N_ACTION_RQ", self.request)
return cast(int, req.ActionTypeID)
except AttributeError:
raise AttributeError(
"The corresponding event is not an N-ACTION request and has "
"no 'Action Type ID' parameter"
)
@property
def attribute_identifiers(self) -> List[BaseTag]:
"""Return an N-GET request's `Attribute Identifier List` as a
:class:`list` of *pydicom* :class:`~pydicom.tag.BaseTag`.
Returns
-------
list of pydicom.tag.BaseTag
The (0000,1005) *Attribute Identifier List* tags, may be an empty
list if no *Attribute Identifier List* was included in the C-GET
request.
Raises
------
AttributeError
If the corresponding event is not an N-GET request.
"""
try:
req = cast("N_GET_RQ", self.request)
attr_list = req.AttributeIdentifierList
if attr_list is None:
return []
return cast(List[BaseTag], attr_list)
except AttributeError:
pass
raise AttributeError(
"The corresponding event is not an N-GET request and has no "
"'Attribute Identifier List' parameter"
)
@property
def attribute_list(self) -> Dataset:
"""Return an N-CREATE request's `Attribute List` as a *pydicom*
:class:`~pydicom.dataset.Dataset`.
Because *pydicom* defers data parsing during decoding until an element
is actually required the returned :class:`~pydicom.dataset.Dataset`
may raise an exception when any element is first accessed. It's
therefore important that proper error handling be part of any handler
that uses the returned :class:`~pydicom.dataset.Dataset`.
Returns
-------
pydicom.dataset.Dataset
The decoded *Attribute List* dataset.
Raises
------
AttributeError
If the corresponding event is not an N-CREATE request.
"""
msg = (
"The corresponding event is not an N-CREATE request and has no "
"'Attribute List' parameter"
)
return self._get_dataset("AttributeList", msg)
@property
def dataset(self) -> Dataset:
"""Return a C-STORE request's `Data Set` as a *pydicom*
:class:`~pydicom.dataset.Dataset`.
Because *pydicom* defers data parsing during decoding until an element
is actually required the returned :class:`~pydicom.dataset.Dataset`
may raise an exception when any element is first accessed. It's
therefore important that proper error handling be part of any handler
that uses the returned :class:`~pydicom.dataset.Dataset`.
Returns
-------
pydicom.dataset.Dataset
The decoded *Data Set* dataset.
Raises
------
AttributeError
If the corresponding event is not a C-STORE request.
"""
msg = (
"The corresponding event is not a C-STORE request and has no "
"'Data Set' parameter"
)
try:
return dcmread(self.dataset_path)
except (TypeError, AttributeError):
pass
return self._get_dataset("DataSet", msg)
@property
def dataset_path(self) -> Path:
"""Return the path to the dataset when
:attr:`~pynetdicom._config.STORE_RECV_CHUNKED_DATASET` is ``True``.
.. versionadded:: 2.0
Returns
-------
pathlib.Path
The path to the dataset.
"""
try:
req = cast("C_STORE_RQ", self.request)
dataset_path = req._dataset_path
except AttributeError:
msg = (
"The corresponding event is either not a C-STORE request or "
"'STORE_RECV_CHUNKED_DATASET' is not True."
)
raise AttributeError(msg)
return cast(Path, dataset_path)
@property
def event(self) -> EventType:
"""Return the corresponding event.
.. versionadded:: 1.4
Returns
-------
events.InterventionEvent or events.NotificationEvent
The corresponding event as a
:func:`namedtuple<collections.namedtuple>`.
"""
return self._event
@property
def event_information(self) -> Dataset:
"""Return an N-EVENT-REPORT request's `Event Information` as a
*pydicom* :class:`~pydicom.dataset.Dataset`.
Because *pydicom* defers data parsing during decoding until an element
is actually required the returned :class:`~pydicom.dataset.Dataset`
may raise an exception when any element is first accessed. It's
therefore important that proper error handling be part of any handler
that uses the returned :class:`~pydicom.dataset.Dataset`.
Returns
-------
pydicom.dataset.Dataset
The decoded *Event Information* dataset.
Raises
------
AttributeError
If the corresponding event is not an N-EVENT-REPORT request.
"""
msg = (
"The corresponding event is not an N-EVENT-REPORT request and has "
"no 'Event Information' parameter"
)
return self._get_dataset("EventInformation", msg)
@property
def event_type(self) -> int:
"""Return an N-EVENT-REPORT request's `Event Type ID` as an
:class:`int`.
.. versionadded:: 1.4
Returns
-------
int
The request's (0000,1002) *Event Type ID* value.
Raises
------
AttributeError
If the corresponding event is not an N-EVENT-REPORT request.
"""
try:
req = cast("N_EVENT_REPORT_RQ", self.request)
return cast(int, req.EventTypeID)
except AttributeError:
raise AttributeError(
"The corresponding event is not an N-EVENT-REPORT request "
"and has no 'Event Type ID' parameter"
)
@property
def file_meta(self) -> FileMetaDataset:
r"""Return a *pydicom* :class:`~pydicom.dataset.Dataset` with the
:dcm:`File Meta Information<part10/chapter_7.html#sect_7.1>` for a
C-STORE request's `Data Set`.
Contains the following File Meta Information elements:
* (0002,0000) *File Meta Information Group Length* - set as ``0``, will
be updated with the correct value during write
* (0002,0001) *File Meta Information Version* - set as ``0x0001``
* (0002,0002) *Media Storage SOP Class UID* - set from the request's
*Affected SOP Class UID*
* (0002,0003) *Media Storage SOP Instance UID* - set from the request's
*Affected SOP Instance UID*
* (0002,0010) *Transfer Syntax UID* - set from the presentation context
used to transfer the *Data Set*
* (0002,0012) *Implementation Class UID* - set using
:attr:`~pynetdicom.PYNETDICOM_IMPLEMENTATION_UID`
* (0002,0013) *Implementation Version Name* - set using
:attr:`~pynetdicom.PYNETDICOM_IMPLEMENTATION_VERSION`
Examples
--------
Add the File Meta Information to the decoded *Data Set* and save it to
the :dcm:`DICOM File Format<part10/chapter_7.html>`.
.. code-block:: python
>>> ds = event.dataset
>>> ds.file_meta = event.file_meta
>>> ds.save_as('example.dcm', write_like_original=False)
Encode the File Meta Information in a new file and append the encoded
*Data Set* to it. This skips having to decode/re-encode the *Data Set*
as in the previous example.
.. code-block:: python
>>> from pydicom.filewriter import write_file_meta_info
>>> with open('example.dcm', 'wb') as f:
... f.write(b'\x00' * 128)
... f.write(b'DICM')
... write_file_meta_info(f, event.file_meta)
... f.write(event.request.DataSet.getvalue())
Returns
-------
pydicom.dataset.Dataset
The File Meta Information suitable for use with the decoded C-STORE
request's *Data Set*.
Raises
------
AttributeError
If the corresponding event is not a C-STORE request.
"""
if not hasattr(self.request, 'DataSet'):
raise AttributeError(
"The corresponding event is not a C-STORE request"
)
# A C-STORE request must have AffectedSOPClassUID and
# AffectedSOPInstanceUID
return create_file_meta(
sop_class_uid=self.request.AffectedSOPClassUID,
sop_instance_uid=self.request.AffectedSOPInstanceUID,
transfer_syntax=self.context.transfer_syntax,
)
def _get_dataset(self, attr: str, exc_msg: str) -> Dataset:
"""Return DIMSE dataset-like parameter as a *pydicom* Dataset.
Parameters
----------
attr : str
The name of the DIMSE primitive's dataset-like parameter, one of
'DataSet', 'Identifier', 'AttributeList', 'ModificationList',
'EventInformation', 'ActionInformation'.
exc_msg : str
The exception message to use if the request primitive has no
dataset-like parameter.
Returns
-------
pydicom.dataset.Dataset
The decoded dataset-like parameter.
Raises
------
AttributeError
If the corresponding event is not due to one of the DIMSE requests
with a dataset-like parameter.
"""
try:
bytestream = getattr(self.request, attr)
# If no change in encoded data then return stored decode
if self._hash == hash(bytestream):
return cast(Dataset, self._decoded)
# Some dataset-like parameters are optional
if bytestream and bytestream.getvalue() != b'':
# Dataset-like parameter has been used
t_syntax = self.context.transfer_syntax
ds = decode(
bytestream,
t_syntax.is_implicit_VR,
t_syntax.is_little_endian,
t_syntax.is_deflated
)
ds.is_little_endian = t_syntax.is_little_endian
ds.is_implicit_VR = t_syntax.is_implicit_VR
# Store the decoded dataset in case its accessed again
self._decoded = ds
else:
# Dataset-like parameter hasn't been used
self._decoded = Dataset()
self._hash = hash(bytestream)
return self._decoded
except AttributeError as exc:
pass
raise AttributeError(exc_msg)
@property
def identifier(self) -> Dataset:
"""Return a C-FIND, C-GET or C-MOVE request's `Identifier` as a
*pydicom* :class:`~pydicom.dataset.Dataset`.
Because *pydicom* defers data parsing during decoding until an element
is actually required the returned :class:`~pydicom.dataset.Dataset`
may raise an exception when any element is first accessed. It's
therefore important that proper error handling be part of any handler
that uses the returned :class:`~pydicom.dataset.Dataset`.
Returns
-------
pydicom.dataset.Dataset
The decoded *Identifier* dataset.
Raises
------
AttributeError
If the corresponding event is not a C-FIND, C-GET or C-MOVE
request.
"""
msg = (
"The corresponding event is not a C-FIND, C-GET or C-MOVE request "
"and has no 'Identifier' parameter"
)
return self._get_dataset("Identifier", msg)
@property
def is_cancelled(self) -> bool:
"""Return ``True`` if a C-CANCEL request has been received.
Returns
-------
bool
If this event corresponds to a C-FIND, C-GET or C-MOVE request
being received by a Service Class then returns ``True`` if a
C-CANCEL request with a *Message ID Being Responded To* parameter
value corresponding to the *Message ID* of the service request has
been received. If no such C-CANCEL request has been received or if
the event is not a C-FIND, C-GET or C-MOVE request then returns
``False``.
"""
try:
return self._is_cancelled(self.request.MessageID)
except AttributeError:
pass
return False
@property
def message_id(self) -> int:
"""Return a DIMSE service request's `Message ID` as :class:`int`.
.. versionadded:: 1.5
Returns
-------
int
The request's (0000,0110) *Message ID* value.
Raises
------
AttributeError
If the corresponding event is not one of the DIMSE service
requests.
"""
try:
return cast(int, self.request.MessageID)
except AttributeError:
raise AttributeError(
"The corresponding event is not a DIMSE service request and "
"has no 'Message ID' parameter"
)
@property
def modification_list(self) -> Dataset:
"""Return an N-SET request's `Modification List` as a *pydicom*
:class:`~pydicom.dataset.Dataset`.
Because *pydicom* defers data parsing during decoding until an element
is actually required the returned :class:`~pydicom.dataset.Dataset`
may raise an exception when any element is first accessed. It's
therefore important that proper error handling be part of any handler
that uses the returned :class:`~pydicom.dataset.Dataset`.
Returns
-------
pydicom.dataset.Dataset
The decoded *Modification List* dataset.
Raises
------
AttributeError
If the corresponding event is not an N-SET request.
"""
msg = (
"The corresponding event is not an N-SET request and has no "
"'Modification List' parameter"
)
return self._get_dataset("ModificationList", msg)
@property
def move_destination(self) -> bytes:
"""Return a C-MOVE request's `Move Destination` as :class:`bytes`.
.. versionadded:: 1.4
Returns
-------
bytes
The request's (0000,0600) *Move Destination* value as length 16
bytes (including trailing spaces as padding if required).
Raises
------
AttributeError
If the corresponding event is not a C-MOVE request.
"""
try:
return cast(bytes, self.request.MoveDestination)
except AttributeError:
raise AttributeError(
"The corresponding event is not a C-MOVE request and has no "
"'Move Destination' parameter"
)
# Default extended negotiation event handlers
def _async_ops_handler(event: Event) -> Tuple[int, int]:
"""Default handler for when an Asynchronous Operations Window Negotiation
item is include in the association request.
See _handlers.doc_handle_async for detailed documentation.
"""
raise NotImplementedError(
"No handler has been bound to 'evt.EVT_ASYNC_OPS', so no "
"Asynchronous Operations Window Negotiation response will be "
"sent"
)
def _sop_common_handler(
event: Event
) -> Dict[UID, "SOPClassCommonExtendedNegotiation"]:
"""Default handler for when one or more SOP Class Common Extended
Negotiation items are included in the association request.
See _handlers.doc_handle_sop_common for detailed documentation.
"""
return {}
def _sop_extended_handler(event: Event) -> Dict[UID, bytes]:
"""Default handler for when one or more SOP Class Extended Negotiation
items are included in the association request.
See _handlers.doc_handler_sop_extended for detailed documentation.
"""
return {}
def _user_identity_handler(event: Event) -> Tuple[bool, Optional[bytes]]:
"""Default hander for when a user identity negotiation item is included
with the association request.
See _handlers.doc_handler_userid for detailed documentation.
"""
raise NotImplementedError(
"No handler has been bound to 'evt.EVT_USER_ID', so the User Identity "
"Negotiation will be ignored and the association accepted (unless "
"rejected for another reason)"
)
# Default service class request handlers
def _c_echo_handler(event: Event) -> _BasicReturnType:
"""Default handler for when a C-ECHO request is received.
See _handlers.doc_handle_echo for detailed documentation.
"""
return 0x0000
def _c_find_handler(event: Event) -> _IteratorType:
"""Default handler for when a C-FIND request is received.
See _handlers.doc_handle_find for detailed documentation.
"""
raise NotImplementedError("No handler has been bound to 'evt.EVT_C_FIND'")
def _c_get_handler(event: Event) -> _IteratorType:
"""Default handler for when a C-GET request is received.
See _handlers.doc_handle_c_get for detailed documentation.
"""
raise NotImplementedError("No handler has been bound to 'evt.EVT_C_GET'")
def _c_move_handler(event: Event) -> _IteratorType:
"""Default handler for when a C-MOVE request is received.
See _handlers.doc_handle_move for detailed documentation.
"""
raise NotImplementedError("No handler has been bound to 'evt.EVT_C_MOVE'")
def _c_store_handler(event: Event) -> _BasicReturnType:
"""Default handler for when a C-STORE request is received.
See _handlers.doc_handle_store for detailed documentation.
"""
raise NotImplementedError("No handler has been bound to 'evt.EVT_C_STORE'")
def _n_action_handler(event: Event) -> _DatasetReturnType:
"""Default handler for when an N-ACTION request is received.
See _handlers.doc_handle_action for detailed documentation.
"""
raise NotImplementedError(
"No handler has been bound to 'evt.EVT_N_ACTION'"
)
def _n_create_handler(event: Event) -> _DatasetReturnType:
"""Default handler for when an N-CREATE request is received.
See _handlers.doc_handle_create for detailed documentation.
"""
raise NotImplementedError(
"No handler has been bound to 'evt.EVT_N_CREATE'"
)
def _n_delete_handler(event: Event) -> _BasicReturnType:
"""Default handler for when an N-DELETE request is received.
See _handlers.doc_handle_delete for detailed documentation.
"""
raise NotImplementedError(
"No handler has been bound to 'evt.EVT_N_DELETE'"
)
def _n_event_report_handler(event: Event) -> _DatasetReturnType:
"""Default handler for when an N-EVENT-REPORT request is received.
See _handlers.doc_handle_event_report for detailed documentation.
"""
raise NotImplementedError(
"No handler has been bound to 'evt.EVT_N_EVENT_REPORT'"
)
def _n_get_handler(event: Event) -> _DatasetReturnType:
"""Default handler for when an N-GET request is received.
See _handlers.doc_handle_n_get for detailed documentation.
"""
raise NotImplementedError("No handler has been bound to 'evt.EVT_N_GET'")
def _n_set_handler(event: Event) -> _DatasetReturnType:
"""Default handler for when an N-SET request is received.
See _handlers.doc_handle_set for detailed documentation.
"""
raise NotImplementedError("No handler has been bound to 'evt.EVT_N_SET'")
| 35.057471
| 119
| 0.64388
|
8b055b861f87f803831b724a06760f33c718778f
| 114
|
py
|
Python
|
folderly/localize.py
|
beesperester/python-folderly
|
fb0b81fb350544ab31e803381f9711d5ed7835cc
|
[
"MIT"
] | null | null | null |
folderly/localize.py
|
beesperester/python-folderly
|
fb0b81fb350544ab31e803381f9711d5ed7835cc
|
[
"MIT"
] | null | null | null |
folderly/localize.py
|
beesperester/python-folderly
|
fb0b81fb350544ab31e803381f9711d5ed7835cc
|
[
"MIT"
] | null | null | null |
""" Localize module. """
TEXT_FOLLOWING_PATHS_WILL_BE_ADDED = "The following paths will be added to the package."
| 38
| 88
| 0.77193
|
aba26f17f32afaca979ad265ed3fda20e3d62a08
| 63
|
py
|
Python
|
run.py
|
Wrytarian/DocSurfer
|
150eb99def20b483fa79528428c429e6fdfd1325
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
Wrytarian/DocSurfer
|
150eb99def20b483fa79528428c429e6fdfd1325
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
Wrytarian/DocSurfer
|
150eb99def20b483fa79528428c429e6fdfd1325
|
[
"Apache-2.0"
] | null | null | null |
from src import app
if __name__ == "__main__":
app.run()
| 10.5
| 26
| 0.634921
|
cd944da7366718c2e9981578451387c3f20ebecd
| 7,415
|
py
|
Python
|
test/test_scatter_gather_ops.py
|
steffenerickson/pytorch
|
0b656c4c69ce77ecd9aace486e471917e4660746
|
[
"Intel"
] | 2
|
2022-02-14T13:56:03.000Z
|
2022-02-14T13:56:05.000Z
|
test/test_scatter_gather_ops.py
|
steffenerickson/pytorch
|
0b656c4c69ce77ecd9aace486e471917e4660746
|
[
"Intel"
] | 1
|
2022-02-03T12:43:23.000Z
|
2022-02-03T12:47:53.000Z
|
test/test_scatter_gather_ops.py
|
steffenerickson/pytorch
|
0b656c4c69ce77ecd9aace486e471917e4660746
|
[
"Intel"
] | 2
|
2019-07-23T14:37:31.000Z
|
2019-07-23T14:47:13.000Z
|
# -*- coding: utf-8 -*-
# Owner(s): ["module: scatter & gather ops"]
import random
import torch
from torch.testing import make_tensor
from torch.testing._internal.common_utils import \
(run_tests, TestCase,)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, dtypesIfCUDA,
toleranceOverride, tol)
# Protects against includes accidentally setting the default dtype
assert torch.get_default_dtype() is torch.float32
# Note: test_scatter_gather_ops.py
# This test file tests scatter and gather operations,
# like torch.scatter and torch.gather.
class TestScatterGather(TestCase):
# Fills an index tensor with valid indices
def _fill_indices(self, idx, dim, dim_size, elems_per_row, m, n, o):
for i in range(1 if dim == 0 else m):
for j in range(1 if dim == 1 else n):
for k in range(1 if dim == 2 else o):
ii = [i, j, k]
ii[dim] = slice(0, idx.size(dim) + 1)
idx[tuple(ii)] = torch.randperm(dim_size)[0:elems_per_row]
@dtypes(torch.float32, torch.complex64)
def test_gather(self, device, dtype):
m, n, o = random.randint(10, 20), random.randint(10, 20), random.randint(10, 20)
elems_per_row = random.randint(1, 10)
dim = random.randrange(3)
src = make_tensor((m, n, o), device=device, dtype=dtype)
idx_size = [m, n, o]
idx_size[dim] = elems_per_row
idx = make_tensor(idx_size, device=device, dtype=torch.long)
self._fill_indices(idx, dim, src.size(dim), elems_per_row, m, n, o)
actual = torch.gather(src, dim, idx)
expected = torch.zeros(idx_size, device=device, dtype=dtype)
for i in range(idx_size[0]):
for j in range(idx_size[1]):
for k in range(idx_size[2]):
ii = [i, j, k]
ii[dim] = idx[i, j, k]
expected[i, j, k] = src[tuple(ii)]
self.assertEqual(actual, expected, atol=0, rtol=0)
# Guarded because torch.max isn't defined for complex types
if not dtype.is_complex:
src = make_tensor((3, 4, 5), device=device, dtype=dtype)
expected, idx = src.max(2, True)
actual = torch.gather(src, 2, idx)
self.assertEqual(actual, expected, atol=0, rtol=0)
@dtypes(torch.bool)
def test_gather_bool(self, device, dtype):
src = torch.tensor(((False, True), (True, True)), device=device, dtype=dtype)
idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long)
actual = torch.gather(src, 1, idx)
expected = torch.tensor(((False, False), (True, True)), device=device, dtype=dtype)
self.assertEqual(actual, expected, atol=0, rtol=0)
def _test_scatter_base(self, fn, *, device, dtype, is_scalar, reduction):
m, n, o = random.randint(10, 20), random.randint(10, 20), random.randint(10, 20)
elems_per_row = random.randint(1, 10)
dim = random.randrange(3)
idx_size = [m, n, o]
idx_size[dim] = elems_per_row
idx = torch.empty(tuple(idx_size), device=device, dtype=torch.long)
self._fill_indices(idx, dim, ([m, n, o])[dim], elems_per_row, m, n, o)
if is_scalar:
src = random.random()
else:
src_size = [random.randint(1, 5) + s for s in idx_size]
src = make_tensor(tuple(src_size), device=device, dtype=dtype)
base = make_tensor((m, n, o), device=device, dtype=dtype)
if reduction is not None:
actual = fn(base.clone(), dim, idx, src, reduce=reduction)
else:
actual = fn(base.clone(), dim, idx, src)
expected = base.clone()
for i in range(idx_size[0]):
for j in range(idx_size[1]):
for k in range(idx_size[2]):
ii = [i, j, k]
ii[dim] = idx[i, j, k]
if fn is torch.Tensor.scatter_add_:
expected[tuple(ii)] += src[i, j, k]
else:
# method may be 'scatter_' or 'scatter'
# both might have a reduction argument
value = src if is_scalar else src[i, j, k]
if reduction == "add":
expected[tuple(ii)] += value
elif reduction == "multiply":
expected[tuple(ii)] *= value
else:
expected[tuple(ii)] = value
self.assertEqual(actual, expected, atol=0, rtol=0)
# Tests empty index
dst = make_tensor((2, 2), device=device, dtype=dtype)
idx = torch.tensor((), device=device, dtype=torch.long)
src = make_tensor((2, 2), device=device, dtype=dtype)
if reduction is not None:
actual = fn(dst, 0, idx, src, reduce=reduction)
else:
actual = fn(dst, 0, idx, src)
self.assertEqual(actual, dst, atol=0, rtol=0)
@dtypes(torch.float16, torch.float32, torch.complex64)
def test_scatter_(self, device, dtype):
self._test_scatter_base(torch.Tensor.scatter_, device=device, dtype=dtype,
is_scalar=False, reduction=None)
@dtypes(torch.float16, torch.float32, torch.complex64)
def test_scatter__scalar(self, device, dtype):
self._test_scatter_base(torch.Tensor.scatter_, device=device, dtype=dtype,
is_scalar=True, reduction=None)
# FIXME: RuntimeError: "cuda_scatter_gather_base_kernel_reduce_multiply" not implemented for 'ComplexFloat'
@toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0)})
@dtypesIfCUDA(torch.float16, torch.float32)
@dtypes(torch.float16, torch.float32, torch.complex64)
def test_scatter__reductions(self, device, dtype):
for reduction in ("add", "multiply"):
self._test_scatter_base(torch.Tensor.scatter_, device=device, dtype=dtype,
is_scalar=False, reduction=reduction)
self._test_scatter_base(torch.Tensor.scatter_, device=device, dtype=dtype,
is_scalar=True, reduction=reduction)
@dtypes(torch.float16, torch.float32, torch.complex64)
def test_scatter_add_(self, device, dtype):
self._test_scatter_base(torch.Tensor.scatter_add_, device=device, dtype=dtype,
is_scalar=False, reduction=None)
@dtypes(torch.float32)
def test_scatter_add_mult_index_base(self, device, dtype):
m, n = 30, 40
idx = torch.zeros(m, n, device=device, dtype=torch.long)
src = torch.ones(m, n, device=device, dtype=dtype)
res0 = torch.zeros(m, n, device=device, dtype=dtype).scatter_add_(0, idx, src)
res1 = torch.zeros(m, n, device=device, dtype=dtype).scatter_add_(1, idx, src)
self.assertEqual(res0[0, :], m * torch.ones(n, device=device, dtype=dtype), atol=0, rtol=0)
self.assertEqual(res1[:, 0], n * torch.ones(m, device=device, dtype=dtype), atol=0, rtol=0)
# Generic Device Test Framework instantation, see
# https://github.com/pytorch/pytorch/wiki/Running-and-writing-tests
# for details.
instantiate_device_type_tests(TestScatterGather, globals())
if __name__ == '__main__':
run_tests()
| 43.87574
| 111
| 0.598651
|
f6979295cb99476524582924383fb4efff8a2732
| 2,553
|
py
|
Python
|
core/dbt/task/deps.py
|
joellabes/dbt
|
1060035838650a30e86989cbf2693db7720ff002
|
[
"Apache-2.0"
] | 1
|
2020-11-18T21:25:53.000Z
|
2020-11-18T21:25:53.000Z
|
core/dbt/task/deps.py
|
joellabes/dbt
|
1060035838650a30e86989cbf2693db7720ff002
|
[
"Apache-2.0"
] | 50
|
2021-11-02T06:20:50.000Z
|
2022-03-31T06:23:16.000Z
|
core/dbt/task/deps.py
|
joellabes/dbt
|
1060035838650a30e86989cbf2693db7720ff002
|
[
"Apache-2.0"
] | 1
|
2021-02-01T17:54:24.000Z
|
2021-02-01T17:54:24.000Z
|
import dbt.utils
import dbt.deprecations
import dbt.exceptions
from dbt.config import UnsetProfileConfig
from dbt.config.renderer import DbtProjectYamlRenderer
from dbt.context.target import generate_target_context
from dbt.deps.base import downloads_directory
from dbt.deps.resolver import resolve_packages
from dbt.logger import GLOBAL_LOGGER as logger
from dbt.clients import system
from dbt.task.base import BaseTask, move_to_nearest_project_dir
class DepsTask(BaseTask):
ConfigType = UnsetProfileConfig
def __init__(self, args, config: UnsetProfileConfig):
super().__init__(args=args, config=config)
def track_package_install(
self, package_name: str, source_type: str, version: str
) -> None:
# Hub packages do not need to be hashed, as they are public
# Use the string 'local' for local package versions
if source_type == 'local':
package_name = dbt.utils.md5(package_name)
version = 'local'
elif source_type != 'hub':
package_name = dbt.utils.md5(package_name)
version = dbt.utils.md5(version)
dbt.tracking.track_package_install(
self.config,
self.config.args,
{
"name": package_name,
"source": source_type,
"version": version
}
)
def run(self):
system.make_directory(self.config.modules_path)
packages = self.config.packages.packages
if not packages:
logger.info('Warning: No packages were found in packages.yml')
return
with downloads_directory():
final_deps = resolve_packages(packages, self.config)
renderer = DbtProjectYamlRenderer(generate_target_context(
self.config, self.config.cli_vars
))
for package in final_deps:
logger.info('Installing {}', package)
package.install(self.config, renderer)
logger.info(' Installed from {}\n',
package.nice_version_name())
self.track_package_install(
package_name=package.name,
source_type=package.source_type(),
version=package.get_version())
@classmethod
def from_args(cls, args):
# deps needs to move to the project directory, as it does put files
# into the modules directory
move_to_nearest_project_dir(args)
return super().from_args(args)
| 33.592105
| 75
| 0.631414
|
b7c8dadccc4c73a9db593b8f6745709e72ed05ab
| 790
|
py
|
Python
|
fractal.py
|
nayanshah/python
|
250d5dfe7d48a15d53288d7a9f371ff7c66de57c
|
[
"MIT"
] | null | null | null |
fractal.py
|
nayanshah/python
|
250d5dfe7d48a15d53288d7a9f371ff7c66de57c
|
[
"MIT"
] | null | null | null |
fractal.py
|
nayanshah/python
|
250d5dfe7d48a15d53288d7a9f371ff7c66de57c
|
[
"MIT"
] | 1
|
2020-05-21T15:13:36.000Z
|
2020-05-21T15:13:36.000Z
|
from turtle import *
# Fractals
def draw_fractal(length, angle, level, initial_state, target, replacement, target2, replacement2):
state = initial_state
for counter in range(level):
state2 = ''
for character in state:
if character == target:
state2 += replacement
elif character == target2:
state2 += replacement2
else:
state2 += character
state = state2
# draw
for character in state:
if character == 'F':
forward(length)
elif character == '+':
right(angle)
elif character == '-':
left(angle)
if __name__ == '__main__':
draw_fractal(5, 90, 10, 'FX', 'X', 'X+YF+', 'Y', '-FX-Y')
| 25.483871
| 98
| 0.517722
|
f87524caf1ad69b26d0535377722f22372e8dc14
| 746
|
py
|
Python
|
bot/conversation/convers_main.py
|
IgV52/bot
|
0c825d558f498269b65413b5772d8cf2a2318b9e
|
[
"MIT"
] | null | null | null |
bot/conversation/convers_main.py
|
IgV52/bot
|
0c825d558f498269b65413b5772d8cf2a2318b9e
|
[
"MIT"
] | null | null | null |
bot/conversation/convers_main.py
|
IgV52/bot
|
0c825d558f498269b65413b5772d8cf2a2318b9e
|
[
"MIT"
] | null | null | null |
from telegram.ext import CommandHandler, MessageHandler, Filters, ConversationHandler
from bot.conversation.talk.conv_talk import conv_talk
from bot.conversation.poll.conv_poll import conv_poll
from bot.conversation.conv_handler import start, dialogue_dontknow, reg
from bot.conversation.select import selection_handlers
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
'reg': [MessageHandler(Filters.text, reg)],
'main': [MessageHandler(Filters.regex('^(Начало)$'), start)],
'select': selection_handlers
},
fallbacks=[MessageHandler(Filters.video | Filters.photo | Filters.document
| Filters.location, dialogue_dontknow)])
| 49.733333
| 85
| 0.727882
|
e16ee5996de34d3091c933b380742a6c7bcc05af
| 3,682
|
py
|
Python
|
script/gen_requirements_all.py
|
entatix/home-assistant
|
0f8d599c829a49c8e35d5d6dc08588810fe1fbe8
|
[
"MIT"
] | 4
|
2017-03-22T21:16:45.000Z
|
2021-06-11T05:08:14.000Z
|
script/gen_requirements_all.py
|
entatix/home-assistant
|
0f8d599c829a49c8e35d5d6dc08588810fe1fbe8
|
[
"MIT"
] | null | null | null |
script/gen_requirements_all.py
|
entatix/home-assistant
|
0f8d599c829a49c8e35d5d6dc08588810fe1fbe8
|
[
"MIT"
] | 4
|
2016-11-27T01:59:49.000Z
|
2018-03-11T07:17:25.000Z
|
#!/usr/bin/env python3
"""Generate an updated requirements_all.txt."""
import importlib
import os
import pkgutil
import re
import sys
COMMENT_REQUIREMENTS = (
'RPi.GPIO',
'rpi-rf',
'Adafruit_Python_DHT',
'fritzconnection',
'pybluez',
'bluepy',
'python-lirc',
'gattlib',
'pyuserinput',
'evdev',
'pycups',
)
IGNORE_PACKAGES = (
'homeassistant.components.recorder.models',
)
def explore_module(package, explore_children):
"""Explore the modules."""
module = importlib.import_module(package)
found = []
if not hasattr(module, '__path__'):
return found
for _, name, _ in pkgutil.iter_modules(module.__path__, package + '.'):
found.append(name)
if explore_children:
found.extend(explore_module(name, False))
return found
def core_requirements():
"""Gather core requirements out of setup.py."""
with open('setup.py') as inp:
reqs_raw = re.search(
r'REQUIRES = \[(.*?)\]', inp.read(), re.S).group(1)
return re.findall(r"'(.*?)'", reqs_raw)
def comment_requirement(req):
"""Some requirements don't install on all systems."""
return any(ign in req for ign in COMMENT_REQUIREMENTS)
def gather_modules():
"""Collect the information and construct the output."""
reqs = {}
errors = []
output = []
for package in sorted(explore_module('homeassistant.components', True) +
explore_module('homeassistant.scripts', True)):
try:
module = importlib.import_module(package)
except ImportError:
if package not in IGNORE_PACKAGES:
errors.append(package)
continue
if not getattr(module, 'REQUIREMENTS', None):
continue
for req in module.REQUIREMENTS:
reqs.setdefault(req, []).append(package)
for key in reqs:
reqs[key] = sorted(reqs[key],
key=lambda name: (len(name.split('.')), name))
if errors:
print("******* ERROR")
print("Errors while importing: ", ', '.join(errors))
print("Make sure you import 3rd party libraries inside methods.")
return None
output.append('# Home Assistant core')
output.append('\n')
output.append('\n'.join(core_requirements()))
output.append('\n')
for pkg, requirements in sorted(reqs.items(), key=lambda item: item[0]):
for req in sorted(requirements,
key=lambda name: (len(name.split('.')), name)):
output.append('\n# {}'.format(req))
if comment_requirement(pkg):
output.append('\n# {}\n'.format(pkg))
else:
output.append('\n{}\n'.format(pkg))
return ''.join(output)
def write_file(data):
"""Write the modules to the requirements_all.txt."""
with open('requirements_all.txt', 'w+') as req_file:
req_file.write(data)
def validate_file(data):
"""Validate if requirements_all.txt is up to date."""
with open('requirements_all.txt', 'r') as req_file:
return data == ''.join(req_file)
def main():
"""Main section of the script."""
if not os.path.isfile('requirements_all.txt'):
print('Run this from HA root dir')
return
data = gather_modules()
if data is None:
sys.exit(1)
if sys.argv[-1] == 'validate':
if validate_file(data):
sys.exit(0)
print("******* ERROR")
print("requirements_all.txt is not up to date")
print("Please run script/gen_requirements_all.py")
sys.exit(1)
write_file(data)
if __name__ == '__main__':
main()
| 25.748252
| 76
| 0.595872
|
2e67d6c4c8e179ffc6ce5098a2f8634ace278be9
| 2,342
|
py
|
Python
|
mac/google-cloud-sdk/lib/surface/compute/routes/delete.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | null | null | null |
mac/google-cloud-sdk/lib/surface/compute/routes/delete.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | 4
|
2020-07-21T12:51:46.000Z
|
2022-01-22T10:29:25.000Z
|
mac/google-cloud-sdk/lib/surface/compute/routes/delete.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | 1
|
2020-07-25T18:17:57.000Z
|
2020-07-25T18:17:57.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for deleting routes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import completers
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute.routes import flags
class Delete(base.DeleteCommand):
r"""Delete routes.
*{command}* deletes one or more Google Compute Engine routes.
## EXAMPLES
To delete a route with the name 'route-name', run:
$ {command} route-name
To delete two routes with the names 'route-name1' and 'route-name2',
run:
$ {command} route-name1 route-name2
"""
ROUTE_ARG = None
@staticmethod
def Args(parser):
Delete.ROUTE_ARG = flags.RouteArgument(plural=True)
Delete.ROUTE_ARG.AddArgument(parser, operation_type='delete')
parser.display_info.AddCacheUpdater(completers.RoutesCompleter)
def Run(self, args):
"""Issues requests necessary to delete Routes."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
route_refs = Delete.ROUTE_ARG.ResolveAsResource(
args,
holder.resources,
scope_lister=compute_flags.GetDefaultScopeLister(client))
utils.PromptForDeletion(route_refs)
requests = []
for route_ref in route_refs:
requests.append((client.apitools_client.routes, 'Delete',
client.messages.ComputeRoutesDeleteRequest(
**route_ref.AsDict())))
return client.MakeRequests(requests)
| 31.648649
| 74
| 0.741674
|
ddbc7ac3c77aece4aa680c141cfbfcf4ad82812b
| 43,468
|
py
|
Python
|
Improving Deep Neural Networks/2-Optimization_methods.py
|
Vishal-V/deeplearning.ai
|
0ff5d4b18c488491634be910de722f798c07de89
|
[
"MIT"
] | 92
|
2018-09-19T15:58:30.000Z
|
2022-03-05T05:19:40.000Z
|
Improving Deep Neural Networks/2-Optimization_methods.py
|
Vishal-V/deeplearning.ai
|
0ff5d4b18c488491634be910de722f798c07de89
|
[
"MIT"
] | 4
|
2018-09-10T15:08:24.000Z
|
2021-02-02T21:51:03.000Z
|
Improving Deep Neural Networks/Optimization_methods.py
|
Vishal-V/100-Days-of-ML-Code
|
677d8d6a19ae63d3aa2ddd74e9ce8ae7a06b71df
|
[
"MIT"
] | 59
|
2018-11-29T20:07:15.000Z
|
2022-03-31T12:34:57.000Z
|
# coding: utf-8
# # Optimization Methods
#
# Until now, you've always used Gradient Descent to update the parameters and minimize the cost. In this notebook, you will learn more advanced optimization methods that can speed up learning and perhaps even get you to a better final value for the cost function. Having a good optimization algorithm can be the difference between waiting days vs. just a few hours to get a good result.
#
# Gradient descent goes "downhill" on a cost function $J$. Think of it as trying to do this:
# <img src="images/cost.jpg" style="width:650px;height:300px;">
# <caption><center> <u> **Figure 1** </u>: **Minimizing the cost is like finding the lowest point in a hilly landscape**<br> At each step of the training, you update your parameters following a certain direction to try to get to the lowest possible point. </center></caption>
#
# **Notations**: As usual, $\frac{\partial J}{\partial a } = $ `da` for any variable `a`.
#
# To get started, run the following code to import the libraries you will need.
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import math
import sklearn
import sklearn.datasets
from opt_utils import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation
from opt_utils import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset
from testCases import *
get_ipython().magic('matplotlib inline')
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# ## 1 - Gradient Descent
#
# A simple optimization method in machine learning is gradient descent (GD). When you take gradient steps with respect to all $m$ examples on each step, it is also called Batch Gradient Descent.
#
# **Warm-up exercise**: Implement the gradient descent update rule. The gradient descent rule is, for $l = 1, ..., L$:
# $$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{1}$$
# $$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{2}$$
#
# where L is the number of layers and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.
# In[2]:
# GRADED FUNCTION: update_parameters_with_gd
def update_parameters_with_gd(parameters, grads, learning_rate):
"""
Update parameters using one step of gradient descent
Arguments:
parameters -- python dictionary containing your parameters to be updated:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients to update each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
learning_rate -- the learning rate, scalar.
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * grads["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * grads["db" + str(l+1)]
### END CODE HERE ###
return parameters
# In[3]:
parameters, grads, learning_rate = update_parameters_with_gd_test_case()
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# **Expected Output**:
#
# <table>
# <tr>
# <td > **W1** </td>
# <td > [[ 1.63535156 -0.62320365 -0.53718766]
# [-1.07799357 0.85639907 -2.29470142]] </td>
# </tr>
#
# <tr>
# <td > **b1** </td>
# <td > [[ 1.74604067]
# [-0.75184921]] </td>
# </tr>
#
# <tr>
# <td > **W2** </td>
# <td > [[ 0.32171798 -0.25467393 1.46902454]
# [-2.05617317 -0.31554548 -0.3756023 ]
# [ 1.1404819 -1.09976462 -0.1612551 ]] </td>
# </tr>
#
# <tr>
# <td > **b2** </td>
# <td > [[-0.88020257]
# [ 0.02561572]
# [ 0.57539477]] </td>
# </tr>
# </table>
#
# A variant of this is Stochastic Gradient Descent (SGD), which is equivalent to mini-batch gradient descent where each mini-batch has just 1 example. The update rule that you have just implemented does not change. What changes is that you would be computing gradients on just one training example at a time, rather than on the whole training set. The code examples below illustrate the difference between stochastic gradient descent and (batch) gradient descent.
#
# - **(Batch) Gradient Descent**:
#
# ``` python
# X = data_input
# Y = labels
# parameters = initialize_parameters(layers_dims)
# for i in range(0, num_iterations):
# # Forward propagation
# a, caches = forward_propagation(X, parameters)
# # Compute cost.
# cost = compute_cost(a, Y)
# # Backward propagation.
# grads = backward_propagation(a, caches, parameters)
# # Update parameters.
# parameters = update_parameters(parameters, grads)
#
# ```
#
# - **Stochastic Gradient Descent**:
#
# ```python
# X = data_input
# Y = labels
# parameters = initialize_parameters(layers_dims)
# for i in range(0, num_iterations):
# for j in range(0, m):
# # Forward propagation
# a, caches = forward_propagation(X[:,j], parameters)
# # Compute cost
# cost = compute_cost(a, Y[:,j])
# # Backward propagation
# grads = backward_propagation(a, caches, parameters)
# # Update parameters.
# parameters = update_parameters(parameters, grads)
# ```
#
# In Stochastic Gradient Descent, you use only 1 training example before updating the gradients. When the training set is large, SGD can be faster. But the parameters will "oscillate" toward the minimum rather than converge smoothly. Here is an illustration of this:
#
# <img src="images/kiank_sgd.png" style="width:750px;height:250px;">
# <caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **SGD vs GD**<br> "+" denotes a minimum of the cost. SGD leads to many oscillations to reach convergence. But each step is a lot faster to compute for SGD than for GD, as it uses only one training example (vs. the whole batch for GD). </center></caption>
#
# **Note** also that implementing SGD requires 3 for-loops in total:
# 1. Over the number of iterations
# 2. Over the $m$ training examples
# 3. Over the layers (to update all parameters, from $(W^{[1]},b^{[1]})$ to $(W^{[L]},b^{[L]})$)
#
# In practice, you'll often get faster results if you do not use neither the whole training set, nor only one training example, to perform each update. Mini-batch gradient descent uses an intermediate number of examples for each step. With mini-batch gradient descent, you loop over the mini-batches instead of looping over individual training examples.
#
# <img src="images/kiank_minibatch.png" style="width:750px;height:250px;">
# <caption><center> <u> <font color='purple'> **Figure 2** </u>: <font color='purple'> **SGD vs Mini-Batch GD**<br> "+" denotes a minimum of the cost. Using mini-batches in your optimization algorithm often leads to faster optimization. </center></caption>
#
# <font color='blue'>
# **What you should remember**:
# - The difference between gradient descent, mini-batch gradient descent and stochastic gradient descent is the number of examples you use to perform one update step.
# - You have to tune a learning rate hyperparameter $\alpha$.
# - With a well-turned mini-batch size, usually it outperforms either gradient descent or stochastic gradient descent (particularly when the training set is large).
# ## 2 - Mini-Batch Gradient descent
#
# Let's learn how to build mini-batches from the training set (X, Y).
#
# There are two steps:
# - **Shuffle**: Create a shuffled version of the training set (X, Y) as shown below. Each column of X and Y represents a training example. Note that the random shuffling is done synchronously between X and Y. Such that after the shuffling the $i^{th}$ column of X is the example corresponding to the $i^{th}$ label in Y. The shuffling step ensures that examples will be split randomly into different mini-batches.
#
# <img src="images/kiank_shuffle.png" style="width:550px;height:300px;">
#
# - **Partition**: Partition the shuffled (X, Y) into mini-batches of size `mini_batch_size` (here 64). Note that the number of training examples is not always divisible by `mini_batch_size`. The last mini batch might be smaller, but you don't need to worry about this. When the final mini-batch is smaller than the full `mini_batch_size`, it will look like this:
#
# <img src="images/kiank_partition.png" style="width:550px;height:300px;">
#
# **Exercise**: Implement `random_mini_batches`. We coded the shuffling part for you. To help you with the partitioning step, we give you the following code that selects the indexes for the $1^{st}$ and $2^{nd}$ mini-batches:
# ```python
# first_mini_batch_X = shuffled_X[:, 0 : mini_batch_size]
# second_mini_batch_X = shuffled_X[:, mini_batch_size : 2 * mini_batch_size]
# ...
# ```
#
# Note that the last mini-batch might end up smaller than `mini_batch_size=64`. Let $\lfloor s \rfloor$ represents $s$ rounded down to the nearest integer (this is `math.floor(s)` in Python). If the total number of examples is not a multiple of `mini_batch_size=64` then there will be $\lfloor \frac{m}{mini\_batch\_size}\rfloor$ mini-batches with a full 64 examples, and the number of examples in the final mini-batch will be ($m-mini_\_batch_\_size \times \lfloor \frac{m}{mini\_batch\_size}\rfloor$).
# In[4]:
# GRADED FUNCTION: random_mini_batches
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
mini_batch_size -- size of the mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
np.random.seed(seed) # To make your "random" minibatches the same as ours
m = X.shape[1] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1,m))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:, k * mini_batch_size : (k+1) * mini_batch_size]
mini_batch_Y = shuffled_Y[:, k * mini_batch_size : (k+1) * mini_batch_size]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
number = (num_complete_minibatches + 1) * mini_batch_size
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:, number : number + (m - num_complete_minibatches * mini_batch_size)]
mini_batch_Y = shuffled_Y[:, number : number + (m - num_complete_minibatches * mini_batch_size)]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
# In[5]:
X_assess, Y_assess, mini_batch_size = random_mini_batches_test_case()
mini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size)
print ("shape of the 1st mini_batch_X: " + str(mini_batches[0][0].shape))
print ("shape of the 2nd mini_batch_X: " + str(mini_batches[1][0].shape))
print ("shape of the 3rd mini_batch_X: " + str(mini_batches[2][0].shape))
print ("shape of the 1st mini_batch_Y: " + str(mini_batches[0][1].shape))
print ("shape of the 2nd mini_batch_Y: " + str(mini_batches[1][1].shape))
print ("shape of the 3rd mini_batch_Y: " + str(mini_batches[2][1].shape))
print ("mini batch sanity check: " + str(mini_batches[0][0][0][0:3]))
# **Expected Output**:
#
# <table style="width:50%">
# <tr>
# <td > **shape of the 1st mini_batch_X** </td>
# <td > (12288, 64) </td>
# </tr>
#
# <tr>
# <td > **shape of the 2nd mini_batch_X** </td>
# <td > (12288, 64) </td>
# </tr>
#
# <tr>
# <td > **shape of the 3rd mini_batch_X** </td>
# <td > (12288, 20) </td>
# </tr>
# <tr>
# <td > **shape of the 1st mini_batch_Y** </td>
# <td > (1, 64) </td>
# </tr>
# <tr>
# <td > **shape of the 2nd mini_batch_Y** </td>
# <td > (1, 64) </td>
# </tr>
# <tr>
# <td > **shape of the 3rd mini_batch_Y** </td>
# <td > (1, 20) </td>
# </tr>
# <tr>
# <td > **mini batch sanity check** </td>
# <td > [ 0.90085595 -0.7612069 0.2344157 ] </td>
# </tr>
#
# </table>
# <font color='blue'>
# **What you should remember**:
# - Shuffling and Partitioning are the two steps required to build mini-batches
# - Powers of two are often chosen to be the mini-batch size, e.g., 16, 32, 64, 128.
# ## 3 - Momentum
#
# Because mini-batch gradient descent makes a parameter update after seeing just a subset of examples, the direction of the update has some variance, and so the path taken by mini-batch gradient descent will "oscillate" toward convergence. Using momentum can reduce these oscillations.
#
# Momentum takes into account the past gradients to smooth out the update. We will store the 'direction' of the previous gradients in the variable $v$. Formally, this will be the exponentially weighted average of the gradient on previous steps. You can also think of $v$ as the "velocity" of a ball rolling downhill, building up speed (and momentum) according to the direction of the gradient/slope of the hill.
#
# <img src="images/opt_momentum.png" style="width:400px;height:250px;">
# <caption><center> <u><font color='purple'>**Figure 3**</u><font color='purple'>: The red arrows shows the direction taken by one step of mini-batch gradient descent with momentum. The blue points show the direction of the gradient (with respect to the current mini-batch) on each step. Rather than just following the gradient, we let the gradient influence $v$ and then take a step in the direction of $v$.<br> <font color='black'> </center>
#
#
# **Exercise**: Initialize the velocity. The velocity, $v$, is a python dictionary that needs to be initialized with arrays of zeros. Its keys are the same as those in the `grads` dictionary, that is:
# for $l =1,...,L$:
# ```python
# v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
# v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
# ```
# **Note** that the iterator l starts at 0 in the for loop while the first parameters are v["dW1"] and v["db1"] (that's a "one" on the superscript). This is why we are shifting l to l+1 in the `for` loop.
# In[6]:
# GRADED FUNCTION: initialize_velocity
def initialize_velocity(parameters):
"""
Initializes the velocity as a python dictionary with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
Returns:
v -- python dictionary containing the current velocity.
v['dW' + str(l)] = velocity of dWl
v['db' + str(l)] = velocity of dbl
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
# Initialize velocity
for l in range(L):
W = parameters["W" + str(l+1)]
b = parameters["b" + str(l+1)]
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = np.zeros((W.shape[0], W.shape[1]))
v["db" + str(l+1)] = np.zeros((b.shape[0], b.shape[1]))
### END CODE HERE ###
return v
# In[7]:
parameters = initialize_velocity_test_case()
v = initialize_velocity(parameters)
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
# **Expected Output**:
#
# <table style="width:40%">
# <tr>
# <td > **v["dW1"]** </td>
# <td > [[ 0. 0. 0.]
# [ 0. 0. 0.]] </td>
# </tr>
#
# <tr>
# <td > **v["db1"]** </td>
# <td > [[ 0.]
# [ 0.]] </td>
# </tr>
#
# <tr>
# <td > **v["dW2"]** </td>
# <td > [[ 0. 0. 0.]
# [ 0. 0. 0.]
# [ 0. 0. 0.]] </td>
# </tr>
#
# <tr>
# <td > **v["db2"]** </td>
# <td > [[ 0.]
# [ 0.]
# [ 0.]] </td>
# </tr>
# </table>
#
# **Exercise**: Now, implement the parameters update with momentum. The momentum update rule is, for $l = 1, ..., L$:
#
# $$ \begin{cases}
# v_{dW^{[l]}} = \beta v_{dW^{[l]}} + (1 - \beta) dW^{[l]} \\
# W^{[l]} = W^{[l]} - \alpha v_{dW^{[l]}}
# \end{cases}\tag{3}$$
#
# $$\begin{cases}
# v_{db^{[l]}} = \beta v_{db^{[l]}} + (1 - \beta) db^{[l]} \\
# b^{[l]} = b^{[l]} - \alpha v_{db^{[l]}}
# \end{cases}\tag{4}$$
#
# where L is the number of layers, $\beta$ is the momentum and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$ (that's a "one" on the superscript). So you will need to shift `l` to `l+1` when coding.
# In[8]:
# GRADED FUNCTION: update_parameters_with_momentum
def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):
"""
Update parameters using Momentum
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- python dictionary containing the current velocity:
v['dW' + str(l)] = ...
v['db' + str(l)] = ...
beta -- the momentum hyperparameter, scalar
learning_rate -- the learning rate, scalar
Returns:
parameters -- python dictionary containing your updated parameters
v -- python dictionary containing your updated velocities
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Momentum update for each parameter
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
# compute velocities
v["dW" + str(l+1)] = np.multiply(beta, v["dW" + str(l+1)]) + np.multiply((1 - beta), grads["dW" + str(l+1)])
v["db" + str(l+1)] = np.multiply(beta, v["db" + str(l+1)]) + np.multiply((1 - beta), grads["db" + str(l+1)])
# update parameters
parameters["W" + str(l+1)] = np.subtract(parameters["W" + str(l+1)], learning_rate * v["dW" + str(l+1)])
parameters["b" + str(l+1)] = np.subtract(parameters["b" + str(l+1)], learning_rate * v["db" + str(l+1)])
### END CODE HERE ###
return parameters, v
# In[9]:
parameters, grads, v = update_parameters_with_momentum_test_case()
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
# **Expected Output**:
#
# <table style="width:90%">
# <tr>
# <td > **W1** </td>
# <td > [[ 1.62544598 -0.61290114 -0.52907334]
# [-1.07347112 0.86450677 -2.30085497]] </td>
# </tr>
#
# <tr>
# <td > **b1** </td>
# <td > [[ 1.74493465]
# [-0.76027113]] </td>
# </tr>
#
# <tr>
# <td > **W2** </td>
# <td > [[ 0.31930698 -0.24990073 1.4627996 ]
# [-2.05974396 -0.32173003 -0.38320915]
# [ 1.13444069 -1.0998786 -0.1713109 ]] </td>
# </tr>
#
# <tr>
# <td > **b2** </td>
# <td > [[-0.87809283]
# [ 0.04055394]
# [ 0.58207317]] </td>
# </tr>
#
# <tr>
# <td > **v["dW1"]** </td>
# <td > [[-0.11006192 0.11447237 0.09015907]
# [ 0.05024943 0.09008559 -0.06837279]] </td>
# </tr>
#
# <tr>
# <td > **v["db1"]** </td>
# <td > [[-0.01228902]
# [-0.09357694]] </td>
# </tr>
#
# <tr>
# <td > **v["dW2"]** </td>
# <td > [[-0.02678881 0.05303555 -0.06916608]
# [-0.03967535 -0.06871727 -0.08452056]
# [-0.06712461 -0.00126646 -0.11173103]] </td>
# </tr>
#
# <tr>
# <td > **v["db2"]** </td>
# <td > [[ 0.02344157]
# [ 0.16598022]
# [ 0.07420442]]</td>
# </tr>
# </table>
#
#
# **Note** that:
# - The velocity is initialized with zeros. So the algorithm will take a few iterations to "build up" velocity and start to take bigger steps.
# - If $\beta = 0$, then this just becomes standard gradient descent without momentum.
#
# **How do you choose $\beta$?**
#
# - The larger the momentum $\beta$ is, the smoother the update because the more we take the past gradients into account. But if $\beta$ is too big, it could also smooth out the updates too much.
# - Common values for $\beta$ range from 0.8 to 0.999. If you don't feel inclined to tune this, $\beta = 0.9$ is often a reasonable default.
# - Tuning the optimal $\beta$ for your model might need trying several values to see what works best in term of reducing the value of the cost function $J$.
# <font color='blue'>
# **What you should remember**:
# - Momentum takes past gradients into account to smooth out the steps of gradient descent. It can be applied with batch gradient descent, mini-batch gradient descent or stochastic gradient descent.
# - You have to tune a momentum hyperparameter $\beta$ and a learning rate $\alpha$.
# ## 4 - Adam
#
# Adam is one of the most effective optimization algorithms for training neural networks. It combines ideas from RMSProp (described in lecture) and Momentum.
#
# **How does Adam work?**
# 1. It calculates an exponentially weighted average of past gradients, and stores it in variables $v$ (before bias correction) and $v^{corrected}$ (with bias correction).
# 2. It calculates an exponentially weighted average of the squares of the past gradients, and stores it in variables $s$ (before bias correction) and $s^{corrected}$ (with bias correction).
# 3. It updates parameters in a direction based on combining information from "1" and "2".
#
# The update rule is, for $l = 1, ..., L$:
#
# $$\begin{cases}
# v_{dW^{[l]}} = \beta_1 v_{dW^{[l]}} + (1 - \beta_1) \frac{\partial \mathcal{J} }{ \partial W^{[l]} } \\
# v^{corrected}_{dW^{[l]}} = \frac{v_{dW^{[l]}}}{1 - (\beta_1)^t} \\
# s_{dW^{[l]}} = \beta_2 s_{dW^{[l]}} + (1 - \beta_2) (\frac{\partial \mathcal{J} }{\partial W^{[l]} })^2 \\
# s^{corrected}_{dW^{[l]}} = \frac{s_{dW^{[l]}}}{1 - (\beta_1)^t} \\
# W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{dW^{[l]}}}{\sqrt{s^{corrected}_{dW^{[l]}}} + \varepsilon}
# \end{cases}$$
# where:
# - t counts the number of steps taken of Adam
# - L is the number of layers
# - $\beta_1$ and $\beta_2$ are hyperparameters that control the two exponentially weighted averages.
# - $\alpha$ is the learning rate
# - $\varepsilon$ is a very small number to avoid dividing by zero
#
# As usual, we will store all parameters in the `parameters` dictionary
# **Exercise**: Initialize the Adam variables $v, s$ which keep track of the past information.
#
# **Instruction**: The variables $v, s$ are python dictionaries that need to be initialized with arrays of zeros. Their keys are the same as for `grads`, that is:
# for $l = 1, ..., L$:
# ```python
# v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
# v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
# s["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
# s["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
#
# ```
# In[10]:
# GRADED FUNCTION: initialize_adam
def initialize_adam(parameters) :
"""
Initializes v and s as two python dictionaries with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters["W" + str(l)] = Wl
parameters["b" + str(l)] = bl
Returns:
v -- python dictionary that will contain the exponentially weighted average of the gradient.
v["dW" + str(l)] = ...
v["db" + str(l)] = ...
s -- python dictionary that will contain the exponentially weighted average of the squared gradient.
s["dW" + str(l)] = ...
s["db" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
s = {}
# Initialize v, s. Input: "parameters". Outputs: "v, s".
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
v["dW" + str(l+1)] = np.zeros((parameters["W" + str(l+1)].shape[0], parameters["W" + str(l+1)].shape[1]))
v["db" + str(l+1)] = np.zeros((parameters["b" + str(l+1)].shape[0], parameters["b" + str(l+1)].shape[1]))
s["dW" + str(l+1)] = np.zeros((parameters["W" + str(l+1)].shape[0], parameters["W" + str(l+1)].shape[1]))
s["db" + str(l+1)] = np.zeros((parameters["b" + str(l+1)].shape[0], parameters["b" + str(l+1)].shape[1]))
### END CODE HERE ###
return v, s
# In[11]:
parameters = initialize_adam_test_case()
v, s = initialize_adam(parameters)
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
print("s[\"dW1\"] = " + str(s["dW1"]))
print("s[\"db1\"] = " + str(s["db1"]))
print("s[\"dW2\"] = " + str(s["dW2"]))
print("s[\"db2\"] = " + str(s["db2"]))
# **Expected Output**:
#
# <table style="width:40%">
# <tr>
# <td > **v["dW1"]** </td>
# <td > [[ 0. 0. 0.]
# [ 0. 0. 0.]] </td>
# </tr>
#
# <tr>
# <td > **v["db1"]** </td>
# <td > [[ 0.]
# [ 0.]] </td>
# </tr>
#
# <tr>
# <td > **v["dW2"]** </td>
# <td > [[ 0. 0. 0.]
# [ 0. 0. 0.]
# [ 0. 0. 0.]] </td>
# </tr>
#
# <tr>
# <td > **v["db2"]** </td>
# <td > [[ 0.]
# [ 0.]
# [ 0.]] </td>
# </tr>
# <tr>
# <td > **s["dW1"]** </td>
# <td > [[ 0. 0. 0.]
# [ 0. 0. 0.]] </td>
# </tr>
#
# <tr>
# <td > **s["db1"]** </td>
# <td > [[ 0.]
# [ 0.]] </td>
# </tr>
#
# <tr>
# <td > **s["dW2"]** </td>
# <td > [[ 0. 0. 0.]
# [ 0. 0. 0.]
# [ 0. 0. 0.]] </td>
# </tr>
#
# <tr>
# <td > **s["db2"]** </td>
# <td > [[ 0.]
# [ 0.]
# [ 0.]] </td>
# </tr>
#
# </table>
#
# **Exercise**: Now, implement the parameters update with Adam. Recall the general update rule is, for $l = 1, ..., L$:
#
# $$\begin{cases}
# v_{W^{[l]}} = \beta_1 v_{W^{[l]}} + (1 - \beta_1) \frac{\partial J }{ \partial W^{[l]} } \\
# v^{corrected}_{W^{[l]}} = \frac{v_{W^{[l]}}}{1 - (\beta_1)^t} \\
# s_{W^{[l]}} = \beta_2 s_{W^{[l]}} + (1 - \beta_2) (\frac{\partial J }{\partial W^{[l]} })^2 \\
# s^{corrected}_{W^{[l]}} = \frac{s_{W^{[l]}}}{1 - (\beta_2)^t} \\
# W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{W^{[l]}}}{\sqrt{s^{corrected}_{W^{[l]}}}+\varepsilon}
# \end{cases}$$
#
#
# **Note** that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.
# In[12]:
# GRADED FUNCTION: update_parameters_with_adam
def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):
"""
Update parameters using Adam
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
learning_rate -- the learning rate, scalar.
beta1 -- Exponential decay hyperparameter for the first moment estimates
beta2 -- Exponential decay hyperparameter for the second moment estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
Returns:
parameters -- python dictionary containing your updated parameters
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
"""
L = len(parameters) // 2 # number of layers in the neural networks
v_corrected = {} # Initializing first moment estimate, python dictionary
s_corrected = {} # Initializing second moment estimate, python dictionary
# Perform Adam update on all parameters
for l in range(L):
# Moving average of the gradients. Inputs: "v, grads, beta1". Output: "v".
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = beta1 * v["dW" + str(l+1)] + (1 - beta1) * grads["dW" + str(l+1)]
v["db" + str(l+1)] = beta1 * v["db" + str(l+1)] + (1 - beta1) * grads["db" + str(l+1)]
### END CODE HERE ###
# Compute bias-corrected first moment estimate. Inputs: "v, beta1, t". Output: "v_corrected".
### START CODE HERE ### (approx. 2 lines)
v_corrected["dW" + str(l+1)] = v["dW" + str(l+1)] / (1 - beta1**t)
v_corrected["db" + str(l+1)] = v["db" + str(l+1)] / (1 - beta1**t)
### END CODE HERE ###
# Moving average of the squared gradients. Inputs: "s, grads, beta2". Output: "s".
### START CODE HERE ### (approx. 2 lines)
s["dW" + str(l+1)] = beta2 * s["dW" + str(l+1)] + (1 - beta2) * np.square(grads["dW" + str(l+1)])
s["db" + str(l+1)] = beta2 * s["db" + str(l+1)] + (1 - beta2) * np.square(grads["db" + str(l+1)])
### END CODE HERE ###
# Compute bias-corrected second raw moment estimate. Inputs: "s, beta2, t". Output: "s_corrected".
### START CODE HERE ### (approx. 2 lines)
s_corrected["dW" + str(l+1)] = s["dW" + str(l+1)] / (1 - beta2**t)
s_corrected["db" + str(l+1)] = s["db" + str(l+1)] / (1 - beta2**t)
### END CODE HERE ###
# Update parameters. Inputs: "parameters, learning_rate, v_corrected, s_corrected, epsilon". Output: "parameters".
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * (v_corrected["dW" + str(l+1)] /(np.sqrt(s_corrected["dW" + str(l+1)]) + epsilon))
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * (v_corrected["db" + str(l+1)] /(np.sqrt(s_corrected["db" + str(l+1)]) + epsilon))
### END CODE HERE ###
return parameters, v, s
# In[13]:
parameters, grads, v, s = update_parameters_with_adam_test_case()
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t = 2)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
print("s[\"dW1\"] = " + str(s["dW1"]))
print("s[\"db1\"] = " + str(s["db1"]))
print("s[\"dW2\"] = " + str(s["dW2"]))
print("s[\"db2\"] = " + str(s["db2"]))
# **Expected Output**:
#
# <table>
# <tr>
# <td > **W1** </td>
# <td > [[ 1.63178673 -0.61919778 -0.53561312]
# [-1.08040999 0.85796626 -2.29409733]] </td>
# </tr>
#
# <tr>
# <td > **b1** </td>
# <td > [[ 1.75225313]
# [-0.75376553]] </td>
# </tr>
#
# <tr>
# <td > **W2** </td>
# <td > [[ 0.32648046 -0.25681174 1.46954931]
# [-2.05269934 -0.31497584 -0.37661299]
# [ 1.14121081 -1.09245036 -0.16498684]] </td>
# </tr>
#
# <tr>
# <td > **b2** </td>
# <td > [[-0.88529978]
# [ 0.03477238]
# [ 0.57537385]] </td>
# </tr>
# <tr>
# <td > **v["dW1"]** </td>
# <td > [[-0.11006192 0.11447237 0.09015907]
# [ 0.05024943 0.09008559 -0.06837279]] </td>
# </tr>
#
# <tr>
# <td > **v["db1"]** </td>
# <td > [[-0.01228902]
# [-0.09357694]] </td>
# </tr>
#
# <tr>
# <td > **v["dW2"]** </td>
# <td > [[-0.02678881 0.05303555 -0.06916608]
# [-0.03967535 -0.06871727 -0.08452056]
# [-0.06712461 -0.00126646 -0.11173103]] </td>
# </tr>
#
# <tr>
# <td > **v["db2"]** </td>
# <td > [[ 0.02344157]
# [ 0.16598022]
# [ 0.07420442]] </td>
# </tr>
# <tr>
# <td > **s["dW1"]** </td>
# <td > [[ 0.00121136 0.00131039 0.00081287]
# [ 0.0002525 0.00081154 0.00046748]] </td>
# </tr>
#
# <tr>
# <td > **s["db1"]** </td>
# <td > [[ 1.51020075e-05]
# [ 8.75664434e-04]] </td>
# </tr>
#
# <tr>
# <td > **s["dW2"]** </td>
# <td > [[ 7.17640232e-05 2.81276921e-04 4.78394595e-04]
# [ 1.57413361e-04 4.72206320e-04 7.14372576e-04]
# [ 4.50571368e-04 1.60392066e-07 1.24838242e-03]] </td>
# </tr>
#
# <tr>
# <td > **s["db2"]** </td>
# <td > [[ 5.49507194e-05]
# [ 2.75494327e-03]
# [ 5.50629536e-04]] </td>
# </tr>
# </table>
#
# You now have three working optimization algorithms (mini-batch gradient descent, Momentum, Adam). Let's implement a model with each of these optimizers and observe the difference.
# ## 5 - Model with different optimization algorithms
#
# Lets use the following "moons" dataset to test the different optimization methods. (The dataset is named "moons" because the data from each of the two classes looks a bit like a crescent-shaped moon.)
# In[14]:
train_X, train_Y = load_dataset()
# We have already implemented a 3-layer neural network. You will train it with:
# - Mini-batch **Gradient Descent**: it will call your function:
# - `update_parameters_with_gd()`
# - Mini-batch **Momentum**: it will call your functions:
# - `initialize_velocity()` and `update_parameters_with_momentum()`
# - Mini-batch **Adam**: it will call your functions:
# - `initialize_adam()` and `update_parameters_with_adam()`
# In[15]:
def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 10000, print_cost = True):
"""
3-layer neural network model which can be run in different optimizer modes.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
layers_dims -- python list, containing the size of each layer
learning_rate -- the learning rate, scalar.
mini_batch_size -- the size of a mini batch
beta -- Momentum hyperparameter
beta1 -- Exponential decay hyperparameter for the past gradients estimates
beta2 -- Exponential decay hyperparameter for the past squared gradients estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
num_epochs -- number of epochs
print_cost -- True to print the cost every 1000 epochs
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(layers_dims) # number of layers in the neural networks
costs = [] # to keep track of the cost
t = 0 # initializing the counter required for Adam update
seed = 10 # For grading purposes, so that your "random" minibatches are the same as ours
# Initialize parameters
parameters = initialize_parameters(layers_dims)
# Initialize the optimizer
if optimizer == "gd":
pass # no initialization required for gradient descent
elif optimizer == "momentum":
v = initialize_velocity(parameters)
elif optimizer == "adam":
v, s = initialize_adam(parameters)
# Optimization loop
for i in range(num_epochs):
# Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch
seed = seed + 1
minibatches = random_mini_batches(X, Y, mini_batch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Forward propagation
a3, caches = forward_propagation(minibatch_X, parameters)
# Compute cost
cost = compute_cost(a3, minibatch_Y)
# Backward propagation
grads = backward_propagation(minibatch_X, minibatch_Y, caches)
# Update parameters
if optimizer == "gd":
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
elif optimizer == "momentum":
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)
elif optimizer == "adam":
t = t + 1 # Adam counter
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s,
t, learning_rate, beta1, beta2, epsilon)
# Print the cost every 1000 epoch
if print_cost and i % 1000 == 0:
print ("Cost after epoch %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('epochs (per 100)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
# You will now run this 3 layer neural network with each of the 3 optimization methods.
#
# ### 5.1 - Mini-batch Gradient descent
#
# Run the following code to see how the model does with mini-batch gradient descent.
# In[16]:
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# ### 5.2 - Mini-batch gradient descent with momentum
#
# Run the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small; but for more complex problems you might see bigger gains.
# In[17]:
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Momentum optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# ### 5.3 - Mini-batch with Adam mode
#
# Run the following code to see how the model does with Adam.
# In[18]:
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "adam")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Adam optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# ### 5.4 - Summary
#
# <table>
# <tr>
# <td>
# **optimization method**
# </td>
# <td>
# **accuracy**
# </td>
# <td>
# **cost shape**
# </td>
#
# </tr>
# <td>
# Gradient descent
# </td>
# <td>
# 79.7%
# </td>
# <td>
# oscillations
# </td>
# <tr>
# <td>
# Momentum
# </td>
# <td>
# 79.7%
# </td>
# <td>
# oscillations
# </td>
# </tr>
# <tr>
# <td>
# Adam
# </td>
# <td>
# 94%
# </td>
# <td>
# smoother
# </td>
# </tr>
# </table>
#
# Momentum usually helps, but given the small learning rate and the simplistic dataset, its impact is almost negligeable. Also, the huge oscillations you see in the cost come from the fact that some minibatches are more difficult thans others for the optimization algorithm.
#
# Adam on the other hand, clearly outperforms mini-batch gradient descent and Momentum. If you run the model for more epochs on this simple dataset, all three methods will lead to very good results. However, you've seen that Adam converges a lot faster.
#
# Some advantages of Adam include:
# - Relatively low memory requirements (though higher than gradient descent and gradient descent with momentum)
# - Usually works well even with little tuning of hyperparameters (except $\alpha$)
# **References**:
#
# - Adam paper: https://arxiv.org/pdf/1412.6980.pdf
| 39.195672
| 504
| 0.595288
|
f135c05f8c60e445ac31343b7d537d03e18ba5ea
| 5,635
|
py
|
Python
|
auto_tinder_api.py
|
hc5aleksandrov/autotinder
|
35a36bfe1ad5e09ce953ef7668d4b7d0584d74b1
|
[
"MIT"
] | null | null | null |
auto_tinder_api.py
|
hc5aleksandrov/autotinder
|
35a36bfe1ad5e09ce953ef7668d4b7d0584d74b1
|
[
"MIT"
] | null | null | null |
auto_tinder_api.py
|
hc5aleksandrov/autotinder
|
35a36bfe1ad5e09ce953ef7668d4b7d0584d74b1
|
[
"MIT"
] | null | null | null |
import requests
import datetime
from geopy.geocoders import Nominatim
#from time import sleep
#from random import random
from likeliness_classifier import Classifier
import person_detector
import tensorflow as tf
#import time
import time
import random
from tinder_py.tinder.tinder import TinderClient
TINDER_URL = "https://api.gotinder.com"
geolocator = Nominatim(user_agent="auto-tinder")
PROF_FILE = "./images/unclassified/profiles.txt"
class tinderAPI():
def __init__(self, token):
self._token = token
def profile(self):
data = requests.get(TINDER_URL + "/v2/profile?include=account%2Cuser", headers={"X-Auth-Token": self._token}).json()
return Profile(data["data"], self)
def matches(self, limit=10):
data = requests.get(TINDER_URL + f"/v2/matches?count={limit}", headers={"X-Auth-Token": self._token}).json()
return list(map(lambda match: Person(match["person"], self), data["data"]["matches"]))
def like(self, user_id):
data = requests.get(TINDER_URL + f"/like/{user_id}", headers={"X-Auth-Token": self._token}).json()
return {
"is_match": data["match"],
"liked_remaining": data["likes_remaining"]
}
def dislike(self, user_id):
requests.get(TINDER_URL + f"/pass/{user_id}", headers={"X-Auth-Token": self._token}).json()
return True
def nearby_persons(self):
print('get: ', TINDER_URL + "/v2/recs/core", self._token)
data = requests.get(TINDER_URL + "/v2/recs/core", headers={"X-Auth-Token": self._token}).json()
print('data: ', data)
return list(map(lambda user: Person(user["user"], self), data["data"]["results"]))
def predict(person, classifier, sess):
ratings = []
i = 0
for image in person.photos:
i += 1
req = requests.get(image.url, stream=True)
tmp_filename = f"./images/tmp/run.jpg"
imgname = person.id + "_" + str(i) + '.jpg'
save_img_path = f"./images/unclassified/%s" % imgname
if req.status_code == 200:
with open(tmp_filename, "wb") as f:
f.write(req.content)
with open(save_img_path, "wb") as f2:
f2.write(req.content)
img = person_detector.get_person(tmp_filename, sess)
if img:
img = img.convert('L')
img.save(tmp_filename, "jpeg")
certainty = classifier.classify(tmp_filename)
pos = certainty["positive"]
ratings.append(pos)
ratings.sort(reverse=True)
ratings = ratings[:5]
if len(ratings) == 0:
return 0.001
if len(ratings) == 1:
return ratings[0]
return ratings[0]*0.6 + sum(ratings[1:])/len(ratings[1:])*0.4
if __name__ == "__main__":
token = "0e8d75b7-48cb-4ba5-8baf-9e1aa1dfe806"#"87ffb204-dad2-41c0-a039-62de457616d2"
#api = tinderAPI(token)
api = TinderClient(token)
print('API: ', api)
detection_graph = person_detector.open_graph()
with detection_graph.as_default():
with tf.compat.v1.Session() as sess:
classifier = Classifier(graph="./tf/training_output/retrained_graph.pb",
labels="./tf/training_output/retrained_labels.txt")
end_time = 1640213115 + 60*60*2.8
while True:#time() < end_time:
print(f"------ TIME LEFT: {(end_time - time.time())/60} min -----")
persons = api.get_recommendations()
pos_schools = ["Universität Zürich", "University of Zurich", "UZH", "HWZ Hochschule für Wirtschaft Zürich",
"ETH Zürich", "ETH Zurich", "ETH", "ETHZ", "Hochschule Luzern", "HSLU", "ZHAW",
"Zürcher Hochschule für Angewandte Wissenschaften", "Universität Bern", "Uni Bern",
"PHLU", "PH Luzern", "Fachhochschule Luzern", "Eidgenössische Technische Hochschule Zürich"]
print('len PERSONS: ', len(persons))
print("persons:",persons)
try:
for person in persons:
print("___person:",person)
score = predict(person, classifier, sess)
#print('score: ', score)
#for school in pos_schools:
# if school in person.schools:
# print()
# score *= 1.2
print("-------------------------")
print("ID: ", person.id)
print("Name: ", person.name)
#print("Schools: ", person.schools)
#print("Images: ", person.photos)
print(score)
if score > 0.6:
res = person.like()
print("LIKE")
print("Response: ", res)
else:
res = person.dislike()
print("DISLIKE")
print("Response: ", res)
except Exception:
pass
time.sleep(random.randint(3, 10) )
classifier.close()
| 40.25
| 127
| 0.506477
|
f998cdff92fb2f8e2356f1492a1476a67533a346
| 461
|
py
|
Python
|
2_guess_number_computer/guess-number-main.py
|
rodrigoc-silva/python-little-projects
|
61c0727ed6556325135e00be8194aa69c37db37f
|
[
"MIT"
] | null | null | null |
2_guess_number_computer/guess-number-main.py
|
rodrigoc-silva/python-little-projects
|
61c0727ed6556325135e00be8194aa69c37db37f
|
[
"MIT"
] | null | null | null |
2_guess_number_computer/guess-number-main.py
|
rodrigoc-silva/python-little-projects
|
61c0727ed6556325135e00be8194aa69c37db37f
|
[
"MIT"
] | null | null | null |
import random
def guess(x):
random_number = random.randint(1, x)
guess = 0
while guess != random_number:
guess = int(input(f"Guess a number between 1 and {x}: "))
if guess < random_number:
print('Sorry, guess again. Too low.')
elif guess > random_number:
print('Sorry, guess again. Too high.')
print(f'Woohoo! You have guessed the number {random_number} correctly!')
guess(10)
| 27.117647
| 76
| 0.596529
|
c1c1c6266391d3bfe6e77592e13cab29f4e1587d
| 2,587
|
py
|
Python
|
config/settings/local.py
|
myeongseob91/cppcgram
|
e3c7127ff6067fb412063df59891225728c31e29
|
[
"MIT"
] | null | null | null |
config/settings/local.py
|
myeongseob91/cppcgram
|
e3c7127ff6067fb412063df59891225728c31e29
|
[
"MIT"
] | 18
|
2020-06-05T18:49:24.000Z
|
2022-03-08T22:50:46.000Z
|
config/settings/local.py
|
myeongseob91/cppcgram
|
e3c7127ff6067fb412063df59891225728c31e29
|
[
"MIT"
] | null | null | null |
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY', default='HkwNGPqPu79YE6tHVFF1RxQR3Wmd07fKqfZjf3j5UyRRsVdlpAEV6bcChupykvZT')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = [
"localhost",
"0.0.0.0",
"127.0.0.1",
]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG # noqa F405
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = 'localhost'
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ['debug_toolbar'] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware'] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2']
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ['django_extensions'] # noqa F405
# Your stuff...
# ------------------------------------------------------------------------------
| 39.8
| 113
| 0.581368
|
cc0aa1a60eb7dda3cf1a6cb2f6928de2eecc61f3
| 4,309
|
py
|
Python
|
tests/multi_objective/test_study.py
|
eecshope/optuna
|
b2daf0d9d032db03b7be49fc4856c78ac7d406e1
|
[
"MIT"
] | null | null | null |
tests/multi_objective/test_study.py
|
eecshope/optuna
|
b2daf0d9d032db03b7be49fc4856c78ac7d406e1
|
[
"MIT"
] | null | null | null |
tests/multi_objective/test_study.py
|
eecshope/optuna
|
b2daf0d9d032db03b7be49fc4856c78ac7d406e1
|
[
"MIT"
] | null | null | null |
from typing import List
import uuid
import pytest
import optuna
from optuna.study import StudyDirection
from optuna.testing.storage import StorageSupplier
def test_create_study() -> None:
study = optuna.multi_objective.create_study(["maximize"])
assert study.n_objectives == 1
assert study.directions == [StudyDirection.MAXIMIZE]
study = optuna.multi_objective.create_study(["maximize", "minimize"])
assert study.n_objectives == 2
assert study.directions == [StudyDirection.MAXIMIZE, StudyDirection.MINIMIZE]
with pytest.raises(ValueError):
# Empty `directions` isn't allowed.
study = optuna.multi_objective.create_study([])
def test_load_study() -> None:
with StorageSupplier("new") as storage:
study_name = str(uuid.uuid4())
with pytest.raises(ValueError):
# Test loading an unexisting study.
optuna.multi_objective.study.load_study(study_name=study_name, storage=storage)
# Create a new study.
created_study = optuna.multi_objective.study.create_study(
["minimize"], study_name=study_name, storage=storage
)
# Test loading an existing study.
loaded_study = optuna.multi_objective.study.load_study(
study_name=study_name, storage=storage
)
assert created_study._study._study_id == loaded_study._study._study_id
@pytest.mark.parametrize("n_objectives", [1, 2, 3])
def test_optimize(n_objectives: int) -> None:
directions = ["minimize" for _ in range(n_objectives)]
study = optuna.multi_objective.create_study(directions)
def objective(trial: optuna.multi_objective.trial.MultiObjectiveTrial) -> List[float]:
return [trial.suggest_uniform("v{}".format(i), 0, 5) for i in range(n_objectives)]
study.optimize(objective, n_trials=10)
assert len(study.trials) == 10
for trial in study.trials:
assert len(trial.values) == n_objectives
def test_pareto_front() -> None:
study = optuna.multi_objective.create_study(["minimize", "maximize"])
assert {tuple(t.values) for t in study.get_pareto_front_trials()} == set()
study.optimize(lambda t: [2, 2], n_trials=1)
assert {tuple(t.values) for t in study.get_pareto_front_trials()} == {(2, 2)}
study.optimize(lambda t: [1, 1], n_trials=1)
assert {tuple(t.values) for t in study.get_pareto_front_trials()} == {(1, 1), (2, 2)}
study.optimize(lambda t: [3, 1], n_trials=1)
assert {tuple(t.values) for t in study.get_pareto_front_trials()} == {(1, 1), (2, 2)}
study.optimize(lambda t: [1, 3], n_trials=1)
assert {tuple(t.values) for t in study.get_pareto_front_trials()} == {(1, 3)}
assert len(study.get_pareto_front_trials()) == 1
study.optimize(lambda t: [1, 3], n_trials=1) # The trial result is the same as the above one.
assert {tuple(t.values) for t in study.get_pareto_front_trials()} == {(1, 3)}
assert len(study.get_pareto_front_trials()) == 2
def test_study_user_attrs() -> None:
study = optuna.multi_objective.create_study(["minimize", "maximize"])
assert study.user_attrs == {}
study.set_user_attr("foo", "bar")
assert study.user_attrs == {"foo": "bar"}
study.set_user_attr("baz", "qux")
assert study.user_attrs == {"foo": "bar", "baz": "qux"}
study.set_user_attr("foo", "quux")
assert study.user_attrs == {"foo": "quux", "baz": "qux"}
def test_study_system_attrs() -> None:
study = optuna.multi_objective.create_study(["minimize", "maximize"])
assert study.system_attrs == {"multi_objective:study:directions": ["minimize", "maximize"]}
study.set_system_attr("foo", "bar")
assert study.system_attrs == {
"multi_objective:study:directions": ["minimize", "maximize"],
"foo": "bar",
}
def test_enqueue_trial() -> None:
study = optuna.multi_objective.create_study(["minimize", "maximize"])
study.enqueue_trial({"x": 2})
study.enqueue_trial({"x": 3})
def objective(trial: optuna.multi_objective.trial.MultiObjectiveTrial) -> List[float]:
if trial.number == 0:
assert trial.suggest_uniform("x", 0, 100) == 2
elif trial.number == 1:
assert trial.suggest_uniform("x", 0, 100) == 3
return [0, 0]
study.optimize(objective, n_trials=2)
| 35.03252
| 98
| 0.672546
|
4946a2e8f511ebec8b61c3be5cdb7c0ec84b29ea
| 18,317
|
py
|
Python
|
PTTLibrary/i18n.py
|
buck5060/PTTLibrary
|
fa97ed1bfc2d51608534c106642b3f9eedfce1f8
|
[
"MIT"
] | null | null | null |
PTTLibrary/i18n.py
|
buck5060/PTTLibrary
|
fa97ed1bfc2d51608534c106642b3f9eedfce1f8
|
[
"MIT"
] | null | null | null |
PTTLibrary/i18n.py
|
buck5060/PTTLibrary
|
fa97ed1bfc2d51608534c106642b3f9eedfce1f8
|
[
"MIT"
] | null | null | null |
try:
from . import Util
except ModuleNotFoundError:
import Util
class Language(object):
Chinese = 1
English = 2
MinValue = Chinese
MaxValue = English
LanguageList = [
Language.Chinese,
Language.English,
]
def SpecificLoad(inputLanguage, LangList):
global LanguageList
if len(LanguageList) != len(LangList):
raise ValueError('SpecificLoad LangList legnth error')
if inputLanguage not in LanguageList:
raise ValueError('SpecificLoad Unknow language', inputLanguage)
return LangList[LanguageList.index(inputLanguage)]
def load(inputLanguage):
if not Util.checkRange(Language, inputLanguage):
raise ValueError('Language', inputLanguage)
global Connect
Connect = SpecificLoad(inputLanguage, [
'連線',
'Connect',
])
global Start
Start = SpecificLoad(inputLanguage, [
'開始',
'Start',
])
global ConnectMode
ConnectMode = SpecificLoad(inputLanguage, [
Connect + '模式',
Connect + 'mode',
])
global ConnectMode_Telnet
ConnectMode_Telnet = SpecificLoad(inputLanguage, [
'Telnet',
'Telnet',
])
global ConnectMode_WebSocket
ConnectMode_WebSocket = SpecificLoad(inputLanguage, [
'WebSocket',
'WebSocket',
])
global Active
Active = SpecificLoad(inputLanguage, [
'啟動',
'Active',
])
global ErrorParameter
ErrorParameter = SpecificLoad(inputLanguage, [
'參數錯誤',
'Wrong parameter',
])
global ConnectCore
ConnectCore = SpecificLoad(inputLanguage, [
'連線核心',
'Connect Core',
])
global PTT
PTT = SpecificLoad(inputLanguage, [
'批踢踢',
'PTT',
])
global PTT2
PTT2 = SpecificLoad(inputLanguage, [
'批踢踢兔',
'PTT2',
])
global Init
Init = SpecificLoad(inputLanguage, [
'初始化',
'initialize',
])
global Done
Done = SpecificLoad(inputLanguage, [
'完成',
'Done',
])
global i18n
i18n = SpecificLoad(inputLanguage, [
'多國語系',
'i18n',
])
global Library
Library = SpecificLoad(inputLanguage, [
'函式庫',
'Library',
])
global Fail
Fail = SpecificLoad(inputLanguage, [
'失敗',
'Fail',
])
global Success
Success = SpecificLoad(inputLanguage, [
'成功',
'Success',
])
global Prepare
Prepare = SpecificLoad(inputLanguage, [
'準備',
'Prepare',
])
global Info
Info = SpecificLoad(inputLanguage, [
'資訊',
'INFO',
])
global Debug
Debug = SpecificLoad(inputLanguage, [
'除錯',
'DBUG',
])
global Again
Again = SpecificLoad(inputLanguage, [
'重新',
'Re',
])
global ErrorIDPW
ErrorIDPW = SpecificLoad(inputLanguage, [
'密碼不對或無此帳號',
'Wrong password or no such id',
])
global ScreenNoMatchTarget
ScreenNoMatchTarget = SpecificLoad(inputLanguage, [
'畫面無法辨識',
'This screen is not recognized',
])
global SigningUnPleaseWait
SigningUnPleaseWait = SpecificLoad(inputLanguage, [
'登入中,請稍候',
'Signing in, please wait',
])
global Msg
Msg = SpecificLoad(inputLanguage, [
'訊息',
'Message',
])
global SigningUpdate
SigningUpdate = SpecificLoad(inputLanguage, [
'更新與同步線上使用者及好友名單',
'Updating and synchronizing online users and friends list',
])
global SendMsg
SendMsg = SpecificLoad(inputLanguage, [
'送出訊息',
'Send Msg',
])
global KickOtherLogin
KickOtherLogin = SpecificLoad(inputLanguage, [
'剔除其他登入',
'Kick other login',
])
global NotKickOtherLogin
NotKickOtherLogin = SpecificLoad(inputLanguage, [
'不剔除其他登入',
'Not kick other login',
])
global AnyKeyContinue
AnyKeyContinue = SpecificLoad(inputLanguage, [
'請按任意鍵繼續',
'Any key to continue',
])
global Login
Login = SpecificLoad(inputLanguage, [
'登入',
'Login',
])
global LoginSuccess
LoginSuccess = SpecificLoad(inputLanguage, [
Login + Success,
Login + ' ' + Success,
])
global LoginFail
LoginFail = SpecificLoad(inputLanguage, [
Login + Fail,
Login + ' ' + Fail,
])
global MailBoxFull
MailBoxFull = SpecificLoad(inputLanguage, [
'郵件已滿',
'Mail box is full',
])
global PostNotFinish
PostNotFinish = SpecificLoad(inputLanguage, [
'文章尚未完成',
'Post is not finish',
])
global SystemBusyTryLater
SystemBusyTryLater = SpecificLoad(inputLanguage, [
'系統負荷過重, 請稍後再試',
'System is overloaded, please try again later',
])
global DelWrongPWRecord
DelWrongPWRecord = SpecificLoad(inputLanguage, [
'刪除以上錯誤嘗試的記錄',
'Delete the record of the wrong password',
])
global Logout
Logout = SpecificLoad(inputLanguage, [
'登出',
'Logout',
])
global SpendTime
SpendTime = SpecificLoad(inputLanguage, [
'花費時間',
'Spend time',
])
global GetPTTTime
GetPTTTime = SpecificLoad(inputLanguage, [
'取得批踢踢時間',
'Get PTT time',
])
global LoginTooOften
LoginTooOften = SpecificLoad(inputLanguage, [
'登入太頻繁',
'Login too often',
])
global MustBe
MustBe = SpecificLoad(inputLanguage, [
'必須是',
'must be',
])
global String
String = SpecificLoad(inputLanguage, [
'字串',
'String',
])
global Integer
Integer = SpecificLoad(inputLanguage, [
'整數',
'Integer',
])
global Boolean
Boolean = SpecificLoad(inputLanguage, [
'布林值',
'Boolean',
])
global ID
ID = SpecificLoad(inputLanguage, [
'帳號',
'ID',
])
global Password
Password = SpecificLoad(inputLanguage, [
'密碼',
'Password',
])
global Board
Board = SpecificLoad(inputLanguage, [
'看板',
'Board',
])
global BothInput
BothInput = SpecificLoad(inputLanguage, [
'同時輸入',
'Both input',
])
global NoInput
NoInput = SpecificLoad(inputLanguage, [
'沒有輸入',
'No input',
])
global CatchPost
CatchPost = SpecificLoad(inputLanguage, [
'取得文章',
'Catch post',
])
global PostDeleted
PostDeleted = SpecificLoad(inputLanguage, [
'文章已經被刪除',
'Post has been deleted',
])
global BrowsePost
BrowsePost = SpecificLoad(inputLanguage, [
'瀏覽文章',
'Browse post',
])
global CatchIP
CatchIP = SpecificLoad(inputLanguage, [
'取得 IP',
'Catch IP',
])
global GetPush
GetPush = SpecificLoad(inputLanguage, [
'取得推文',
'Get push',
])
global Update
Update = SpecificLoad(inputLanguage, [
'更新',
'Update',
])
global Push
Push = SpecificLoad(inputLanguage, [
'推文',
'Push',
])
global Date
Date = SpecificLoad(inputLanguage, [
'日期',
'Date',
])
global Content
Content = SpecificLoad(inputLanguage, [
'內文',
'Content',
])
global Author
Author = SpecificLoad(inputLanguage, [
'作者',
'Author',
])
global Title
Title = SpecificLoad(inputLanguage, [
'標題',
'Title',
])
global UnknowError
UnknowError = SpecificLoad(inputLanguage, [
'未知錯誤',
'Unknow Error',
])
global RequireLogin
RequireLogin = SpecificLoad(inputLanguage, [
'請先' + Login,
'Please ' + Login + ' first',
])
global HasPushPermission
HasPushPermission = SpecificLoad(inputLanguage, [
'使用者擁有推文權限',
'User Has Push Permission',
])
global HasPostPermission
HasPostPermission = SpecificLoad(inputLanguage, [
'使用者擁有貼文權限',
'User Has Post Permission',
])
global NoPermission
NoPermission = SpecificLoad(inputLanguage, [
'使用者沒有權限',
'User Has No Permission',
])
global SaveFile
SaveFile = SpecificLoad(inputLanguage, [
'儲存檔案',
'Save File',
])
global SelectSignature
SelectSignature = SpecificLoad(inputLanguage, [
'選擇簽名檔',
'Select Signature',
])
global FindNewestIndex
FindNewestIndex = SpecificLoad(inputLanguage, [
'找到最新編號',
'Find Newest Index',
])
global OutOfRange
OutOfRange = SpecificLoad(inputLanguage, [
'超出範圍',
'Out Of Range',
])
global MustSmall
MustSmall = SpecificLoad(inputLanguage, [
'必須小於',
'Must Small than',
])
global VotePost
VotePost = SpecificLoad(inputLanguage, [
'投票文章',
'Vote Post',
])
global SubstandardPost
SubstandardPost = SpecificLoad(inputLanguage, [
'不合規範文章',
'Substandard Post',
])
global DoNothing
DoNothing = SpecificLoad(inputLanguage, [
'不處理',
'Do Nothing',
])
global NoFastPush
NoFastPush = SpecificLoad(inputLanguage, [
'禁止快速連續推文',
'No Fast Push',
])
global OnlyArrow
OnlyArrow = SpecificLoad(inputLanguage, [
'使用加註方式',
'Arrow Only in Push',
])
global GetUser
GetUser = SpecificLoad(inputLanguage, [
'取得使用者',
'Get User',
])
global NoSuchUser
NoSuchUser = SpecificLoad(inputLanguage, [
'無該使用者',
'No such user',
])
global WaterBall
WaterBall = SpecificLoad(inputLanguage, [
'水球',
'Water Ball',
])
global UserOffline
UserOffline = SpecificLoad(inputLanguage, [
'使用者離線',
'User Offline',
])
global SetCallStatus
SetCallStatus = SpecificLoad(inputLanguage, [
'設定呼叫器狀態',
'Set Call Status',
])
global Throw
Throw = SpecificLoad(inputLanguage, [
'丟',
'Throw',
])
global NoWaterball
NoWaterball = SpecificLoad(inputLanguage, [
'無訊息記錄',
'No Waterball',
])
global BrowseWaterball
BrowseWaterball = SpecificLoad(inputLanguage, [
'瀏覽水球紀錄',
'Browse Waterball',
])
global LanguageModule
LanguageModule = SpecificLoad(inputLanguage, [
'語言模組',
'Language Module',
])
global English
English = SpecificLoad(inputLanguage, [
'英文',
'English',
])
global ChineseTranditional
ChineseTranditional = SpecificLoad(inputLanguage, [
'繁體中文',
'Chinese Tranditional',
])
global GetCallStatus
GetCallStatus = SpecificLoad(inputLanguage, [
'取得呼叫器狀態',
'Get BBCall Status',
])
global NoMoney
NoMoney = SpecificLoad(inputLanguage, [
'P 幣不足',
'No Money',
])
global InputID
InputID = SpecificLoad(inputLanguage, [
'輸入帳號',
'Input ID',
])
global InputMoney
InputMoney = SpecificLoad(inputLanguage, [
'輸入金額',
'Input Money',
])
global AuthenticationHasNotExpired
AuthenticationHasNotExpired = SpecificLoad(inputLanguage, [
'認證尚未過期',
'Authentication has not expired',
])
global VerifyID
VerifyID = SpecificLoad(inputLanguage, [
'確認身分',
'Verify ID',
])
global TradingInProgress
TradingInProgress = SpecificLoad(inputLanguage, [
'交易正在進行中',
'Trading is in progress',
])
global Transaction
Transaction = SpecificLoad(inputLanguage, [
'交易',
'Transaction',
])
global MoneyTooFew
MoneyTooFew = SpecificLoad(inputLanguage, [
'金額過少,交易取消!',
'The amount is too small, the transaction is cancelled!',
])
global ConstantRedBag
ConstantRedBag = SpecificLoad(inputLanguage, [
'不修改紅包袋',
'Constant the red bag',
])
global SendMail
SendMail = SpecificLoad(inputLanguage, [
'寄信',
'Send Mail',
])
global Select
Select = SpecificLoad(inputLanguage, [
'選擇',
'Select',
])
global SignatureFile
SignatureFile = SpecificLoad(inputLanguage, [
'簽名檔',
'Signature File',
])
global NoSignatureFile
NoSignatureFile = SpecificLoad(inputLanguage, [
'不加簽名檔',
'No Signature File',
])
global SelfSaveDraft
SelfSaveDraft = SpecificLoad(inputLanguage, [
'自存底稿',
'Self-Save Draft',
])
global MailBox
MailBox = SpecificLoad(inputLanguage, [
'郵件選單',
'Mail Box',
])
global NoSuchBoard
NoSuchBoard = SpecificLoad(inputLanguage, [
'無該板面',
'No Such Board',
])
global HideSensitiveInfor
HideSensitiveInfor = SpecificLoad(inputLanguage, [
'隱藏敏感資訊',
'Hide Sensitive Information',
])
global PostFormatError
PostFormatError = SpecificLoad(inputLanguage, [
'文章格式錯誤',
'Post Format Error',
])
global LogHandler
LogHandler = SpecificLoad(inputLanguage, [
'紀錄額取器',
'Log Handler',
])
global NewCursor
NewCursor = SpecificLoad(inputLanguage, [
'新式游標',
'New Type Cursor',
])
global OldCursor
OldCursor = SpecificLoad(inputLanguage, [
'舊式游標',
'Old Type Cursor',
])
global PostNoContent
PostNoContent = SpecificLoad(inputLanguage, [
'此文章無內容',
'Post has no content',
])
global ConnectionClosed
ConnectionClosed = SpecificLoad(inputLanguage, [
'連線已經被關閉',
'Connection Closed',
])
global BoardList
BoardList = SpecificLoad(inputLanguage, [
'看板列表',
'Board List',
])
global UnregisteredUserCantUseAllAPI
UnregisteredUserCantUseAllAPI = SpecificLoad(inputLanguage, [
'未註冊使用者,將無法使用全部功能',
'Unregistered User Can\'t Use All API',
])
global UnregisteredUserCantUseThisAPI
UnregisteredUserCantUseThisAPI = SpecificLoad(inputLanguage, [
'未註冊使用者,無法使用此功能',
'Unregistered User Can\'t Use This API',
])
global MultiThreadOperate
MultiThreadOperate = SpecificLoad(inputLanguage, [
'請勿使用多核心同時操作一個 PTT Library 物件',
'Do not use a multi-thread to operate a PTT Library object',
])
global HasNewMailGotoMainMenu
HasNewMailGotoMainMenu = SpecificLoad(inputLanguage, [
'有新信,回到主選單',
'Have a new letter, return to the main menu',
])
global UseTooManyResources
UseTooManyResources = SpecificLoad(inputLanguage, [
'耗用太多資源',
'Use too many resources of PTT',
])
global Host
Host = SpecificLoad(inputLanguage, [
'主機',
'Host',
])
global PTT2NotSupport
PTT2NotSupport = SpecificLoad(inputLanguage, [
f'{PTT2}不支援',
f'{PTT2} Not Support',
])
# Animation
global AnimationPost
AnimationPost = SpecificLoad(inputLanguage, [
'動畫文章',
'Animation Post',
])
global RestoreConnection
RestoreConnection = SpecificLoad(inputLanguage, [
'恢復連線',
'Restore Connection',
])
global NoPush
NoPush = SpecificLoad(inputLanguage, [
'禁止推薦',
'No Push',
])
# Sorry, this article has been closed and marked, no response
global NoResponse
NoResponse = SpecificLoad(inputLanguage, [
'很抱歉, 此文章已結案並標記, 不得回應',
'This Post has been closed and marked, no response',
])
global ReplyBoard
ReplyBoard = SpecificLoad(inputLanguage, [
'回應至看板',
'Respond to the Board',
])
global ReplyMail
ReplyMail = SpecificLoad(inputLanguage, [
'回應至作者信箱',
'Respond to the mailbox of author',
])
global ReplyBoard_Mail
ReplyBoard_Mail = SpecificLoad(inputLanguage, [
'回應至看板與作者信箱',
'Respond to the Board and the mailbox of author',
])
global UseTheOriginalTitle
UseTheOriginalTitle = SpecificLoad(inputLanguage, [
'採用原標題',
'Use the original title',
])
global QuoteOriginal
QuoteOriginal = SpecificLoad(inputLanguage, [
'引用原文',
'Quote original',
])
global EditPost
EditPost = SpecificLoad(inputLanguage, [
'編輯文章',
'Edit Post',
])
global RespondSuccess
RespondSuccess = SpecificLoad(inputLanguage, [
'回應成功',
'Respond Success',
])
global ForcedWrite
ForcedWrite = SpecificLoad(inputLanguage, [
'強制寫入',
'Forced Write',
])
global NoPost
NoPost = SpecificLoad(inputLanguage, [
'沒有文章',
'No Post',
])
global NeedModeratorPermission
NeedModeratorPermission = SpecificLoad(inputLanguage, [
'需要板主權限',
'Need Moderator Permission',
])
global NewSettingsHaveBeenSaved
NewSettingsHaveBeenSaved = SpecificLoad(inputLanguage, [
'已儲存新設定',
'New settings have been saved',
])
global NoChanges
NoChanges = SpecificLoad(inputLanguage, [
'未改變任何設定',
'No changes have been made to any settings',
])
global Mark
Mark = SpecificLoad(inputLanguage, [
'標記',
'Mark',
])
global DelAllMarkPost
DelAllMarkPost = SpecificLoad(inputLanguage, [
'刪除所有標記文章',
'Del All Mark Post',
])
# No changes have been made to any settings
# Quote original
# global List
# List = []
# for k, v in globals().items():
# # System Var
# if k.startswith('_'):
# continue
# print(f'k {k}')
# print(f'v {v}')
# if isinstance(k, str) and isinstance(v, str):
# List.append(k)
def _createlist():
i18nStrList = []
for k, v in globals().items():
# System Var
if k.startswith('_'):
continue
if isinstance(k, str) and isinstance(v, str):
i18nStrList.append(k)
with open('i18n.txt', 'w') as F:
F.write('\n'.join(i18nStrList))
if __name__ == '__main__':
load(Language.Chinese)
_createlist()
| 20.511758
| 71
| 0.57968
|
4f12dea1028c305cf8026c2e4b27f8832e46c90e
| 19,217
|
py
|
Python
|
install/gcp/installer/forseti_server_installer.py
|
mcunha/forseti-security
|
cbf25f6173c1a25d4e43a9738eca73f927361cb8
|
[
"Apache-2.0"
] | null | null | null |
install/gcp/installer/forseti_server_installer.py
|
mcunha/forseti-security
|
cbf25f6173c1a25d4e43a9738eca73f927361cb8
|
[
"Apache-2.0"
] | null | null | null |
install/gcp/installer/forseti_server_installer.py
|
mcunha/forseti-security
|
cbf25f6173c1a25d4e43a9738eca73f927361cb8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forseti Server installer."""
from __future__ import print_function
import os
import random
from forseti_installer import ForsetiInstaller
from util import constants
from util import files
from util import gcloud
from util import merge_engine
from util import utils
from util import upgradeable_resources
class ForsetiServerInstaller(ForsetiInstaller):
"""Forseti server installer."""
gsuite_service_acct_email = None
has_roles_script = False
setup_explain = True
enable_write_access = True
resource_root_id = None
access_target = None
target_id = None
migrate_from_v1 = False
user_can_grant_roles = True
def __init__(self, config, previous_installer=None):
"""Init.
Args:
config (ServerConfig): The configuration object.
previous_installer (ForsetiInstaller): The previous ran installer,
we can get the installer environment information from it.
"""
super(ForsetiServerInstaller, self).__init__(config,
previous_installer)
self.v1_config = None
def preflight_checks(self):
"""Pre-flight checks for server instance."""
super(ForsetiServerInstaller, self).preflight_checks()
self.config.generate_cloudsql_instance()
self.get_email_settings()
gcloud.enable_apis(self.config.dry_run)
forseti_v1_name = None
if not self.config.dry_run:
_, zone, forseti_v1_name = gcloud.get_vm_instance_info(
constants.REGEX_MATCH_FORSETI_V1_INSTANCE_NAME, try_match=True)
if forseti_v1_name:
utils.print_banner('Found A V1 Installation:'
' Importing Configuration And Rules.')
# v1 instance exists, ask if the user wants to port
# the conf/rules settings from v1.
self.prompt_v1_configs_migration()
if self.migrate_from_v1:
self.v1_config = upgradeable_resources.ForsetiV1Configuration(
self.project_id, forseti_v1_name, zone)
self.v1_config.fetch_information_from_gcs()
self.populate_config_info_from_v1()
self.determine_access_target()
print('Forseti will be granted write access and required roles to: '
'{}'.format(self.resource_root_id))
def prompt_v1_configs_migration(self):
"""Ask the user if they want to migrate conf/rule files
from v1 to v2."""
choice = ''
while choice != 'y' and choice != 'n':
choice = raw_input(
constants.QUESTION_SHOULD_MIGRATE_FROM_V1).lower()
self.migrate_from_v1 = choice == 'y'
def populate_config_info_from_v1(self):
"""Retrieve the v1 configuration object."""
v1_conf_global = self.v1_config.config.get('global')
self.config.sendgrid_api_key = v1_conf_global.get('sendgrid_api_key')
self.config.gsuite_superadmin_email = v1_conf_global.get(
'domain_super_admin_email')
self.config.notification_recipient_email = v1_conf_global.get(
'email_recipient')
self.config.notification_sender_email = v1_conf_global.get(
'email_sender')
def deploy(self, deployment_tpl_path, conf_file_path, bucket_name):
"""Deploy Forseti using the deployment template.
Grant access to service account.
Args:
deployment_tpl_path (str): Deployment template path.
conf_file_path (str): Configuration file path.
bucket_name (str): Name of the GCS bucket.
Returns:
bool: Whether or not the deployment was successful.
str: Deployment name.
"""
success, deployment_name = super(ForsetiServerInstaller, self).deploy(
deployment_tpl_path, conf_file_path, bucket_name)
if success:
# Fill in the default values for all the rule files
default_rule_values = self.get_rule_default_values()
files.update_rule_files(default_rule_values,
constants.RULES_DIR_PATH)
# Replace new rules if necessary.
if self.migrate_from_v1:
self.replace_with_old_rules()
print('Copying the default Forseti rules to:\n\t{}'.format(
os.path.join(bucket_name, 'rules')))
# Copy the rule directory to the GCS bucket.
files.copy_file_to_destination(
constants.RULES_DIR_PATH, bucket_name,
is_directory=True, dry_run=self.config.dry_run)
self.has_roles_script = gcloud.grant_server_svc_acct_roles(
self.enable_write_access,
self.access_target,
self.target_id,
self.project_id,
self.gcp_service_acct_email,
self.user_can_grant_roles)
# Waiting for VM to be initialized.
instance_name = 'forseti-{}-vm-{}'.format(
self.config.installation_type,
self.config.identifier)
self.wait_until_vm_initialized(instance_name)
# Create firewall rules.
self.create_firewall_rules()
return success, deployment_name
def replace_with_old_rules(self):
"""Replace new rules with old rules.
This is very specific for migration from v1 to v2 because we don't
want to modify the rule files that user defined in v1.
"""
for v1_rule in self.v1_config.rules:
new_rule_path = os.path.join(constants.RULES_DIR_PATH,
v1_rule.file_name)
files.write_data_to_yaml_file(v1_rule.data, new_rule_path)
def create_firewall_rules(self):
"""Create firewall rules for Forseti server instance."""
# Rule to block out all the ingress traffic.
gcloud.create_firewall_rule(
self.format_firewall_rule_name('forseti-server-deny-all'),
[self.gcp_service_acct_email],
constants.FirewallRuleAction.DENY,
['icmp', 'udp', 'tcp'],
constants.FirewallRuleDirection.INGRESS,
1)
# Rule to open only port tcp:50051 within the
# internal network (ip-ranges - 10.128.0.0/9).
gcloud.create_firewall_rule(
self.format_firewall_rule_name('forseti-server-allow-grpc'),
[self.gcp_service_acct_email],
constants.FirewallRuleAction.ALLOW,
['tcp:50051'],
constants.FirewallRuleDirection.INGRESS,
0,
'10.128.0.0/9')
# Create firewall rule to open only port tcp:22 (ssh)
# to all the external traffics from the internet.
gcloud.create_firewall_rule(
self.format_firewall_rule_name(
'forseti-server-allow-ssh-external'),
[self.gcp_service_acct_email],
constants.FirewallRuleAction.ALLOW,
['tcp:22'],
constants.FirewallRuleDirection.INGRESS,
0,
'0.0.0.0/0')
def generate_forseti_conf(self):
"""Generate Forseti conf file.
If self.migrate_from_v1 is True, pull the v1 configuration
file and merge it with v2 template.
Returns:
str: Forseti configuration file path.
"""
forseti_conf_path = super(
ForsetiServerInstaller, self).generate_forseti_conf()
if self.migrate_from_v1:
new_conf = files.read_yaml_file_from_local(forseti_conf_path)
fields_to_ignore = [
'db_host', 'db_user', 'db_name',
'inventory', 'output_path', 'gcs_path',
'groups_service_account_key_file',
'domain_super_admin_email',
'max_admin_api_calls_per_100_seconds',
'max_appengine_api_calls_per_second',
'max_bigquery_api_calls_per_100_seconds',
'max_cloudbilling_api_calls_per_60_seconds',
'max_compute_api_calls_per_second',
'max_container_api_calls_per_100_seconds',
'max_crm_api_calls_per_100_seconds',
'max_iam_api_calls_per_second',
'max_results_admin_api',
'max_sqladmin_api_calls_per_100_seconds',
'resources']
field_identifiers = {'scanners': 'name',
'resources': 'resource',
'pipelines': 'name'}
merge_engine.merge_object(merge_from=self.v1_config.config,
merge_to=new_conf,
fields_to_ignore=fields_to_ignore,
field_identifiers=field_identifiers)
# Fields that have changed categories cannot be merged,
# swap them here instead.
self._swap_config_fields(self.v1_config.config, new_conf)
files.write_data_to_yaml_file(new_conf, forseti_conf_path)
return forseti_conf_path
def format_firewall_rule_name(self, rule_name):
"""Format firewall rule name.
Args:
rule_name (str): Name of the firewall rule.
Returns:
str: Firewall rule name.
"""
return '{}-{}'.format(rule_name, self.config.identifier)
def get_deployment_values(self):
"""Get deployment values.
Returns:
dict: A dictionary of values needed to generate
the forseti deployment template.
"""
bucket_name = self.generate_bucket_name()
return {
'CLOUDSQL_REGION': self.config.cloudsql_region,
'CLOUDSQL_INSTANCE_NAME': self.config.cloudsql_instance,
'FORSETI_BUCKET': bucket_name[len('gs://'):],
'BUCKET_LOCATION': self.config.bucket_location,
'GCP_SERVER_SERVICE_ACCOUNT': self.gcp_service_acct_email,
'FORSETI_VERSION': self.version,
'RAND_MINUTE': random.randint(0, 59)
}
def get_configuration_values(self):
"""Get configuration values.
Returns:
dict: A dictionary of values needed to generate
the forseti configuration file.
"""
bucket_name = self.generate_bucket_name()
return {
'EMAIL_RECIPIENT': self.config.notification_recipient_email,
'EMAIL_SENDER': self.config.notification_sender_email,
'SENDGRID_API_KEY': self.config.sendgrid_api_key,
'FORSETI_BUCKET': bucket_name[len('gs://'):],
'DOMAIN_SUPER_ADMIN_EMAIL': self.config.gsuite_superadmin_email,
'ROOT_RESOURCE_ID': self.resource_root_id,
}
def get_rule_default_values(self):
"""Get rule default values.
Returns:
dict: A dictionary of default values.
"""
organization_id = self.resource_root_id.split('/')[-1]
domain = gcloud.get_domain_from_organization_id(organization_id)
return {
'ORGANIZATION_ID': organization_id,
'DOMAIN': domain
}
def determine_access_target(self):
"""Determine where to enable Forseti access.
Allow only org level access since IAM explain
requires org level access.
"""
utils.print_banner('Forseti Installation Configuration')
if not self.config.advanced_mode:
self.access_target = constants.RESOURCE_TYPES[0]
self.target_id = self.organization_id
while not self.target_id:
if self.setup_explain:
# If user wants to setup Explain, they must setup
# access on an organization.
choice_index = 1
else:
try:
print(constants.MESSAGE_FORSETI_CONFIGURATION_ACCESS_LEVEL)
for (i, choice) in enumerate(constants.RESOURCE_TYPES):
print('[%s] %s' % (i+1, choice))
choice_input = raw_input(
constants.QUESTION_FORSETI_CONFIGURATION_ACCESS_LEVEL
).strip()
choice_index = int(choice_input)
except ValueError:
print('Invalid choice, try again.')
continue
if choice_index and choice_index <= len(constants.RESOURCE_TYPES):
self.access_target = constants.RESOURCE_TYPES[choice_index-1]
if self.access_target == 'organization':
self.target_id = gcloud.choose_organization()
elif self.access_target == 'folder':
self.target_id = gcloud.choose_folder(self.organization_id)
else:
self.target_id = gcloud.choose_project()
self.resource_root_id = utils.format_resource_id(
'%ss' % self.access_target, self.target_id)
def get_email_settings(self):
"""Ask user for specific install values."""
utils.print_banner('Configuring GSuite Admin Information')
while not self.config.gsuite_superadmin_email:
# User has to enter a G Suite super admin email.
print(constants.MESSAGE_ASK_GSUITE_SUPERADMIN_EMAIL)
self.config.gsuite_superadmin_email = raw_input(
constants.QUESTION_GSUITE_SUPERADMIN_EMAIL).strip()
if self.config.skip_sendgrid_config:
print(constants.MESSAGE_SKIP_SENDGRID_API_KEY)
return
utils.print_banner('Configuring Forseti Email Settings')
if not self.config.sendgrid_api_key:
# Ask for SendGrid API Key.
print(constants.MESSAGE_ASK_SENDGRID_API_KEY)
self.config.sendgrid_api_key = raw_input(
constants.QUESTION_SENDGRID_API_KEY).strip()
if self.config.sendgrid_api_key:
if not self.config.notification_sender_email:
self.config.notification_sender_email = (
constants.NOTIFICATION_SENDER_EMAIL)
# Ask for notification recipient email.
if not self.config.notification_recipient_email:
self.config.notification_recipient_email = raw_input(
constants.QUESTION_NOTIFICATION_RECIPIENT_EMAIL).strip()
def post_install_instructions(self, deploy_success,
forseti_conf_path, bucket_name):
"""Show post-install instructions.
For example: link for deployment manager dashboard and
link to go to G Suite service account and enable DWD.
Args:
deploy_success (bool): Whether deployment was successful
forseti_conf_path (str): Forseti configuration file path
bucket_name (str): Name of the GCS bucket
Returns:
ForsetiInstructions: Forseti instructions.
"""
instructions = (
super(ForsetiServerInstaller, self).post_install_instructions(
deploy_success, forseti_conf_path, bucket_name))
instructions.other_messages.append(
constants.MESSAGE_ENABLE_GSUITE_GROUP_INSTRUCTIONS)
if self.has_roles_script:
instructions.other_messages.append(
constants.MESSAGE_HAS_ROLE_SCRIPT)
if not self.config.sendgrid_api_key:
instructions.other_messages.append(
constants.MESSAGE_FORSETI_SENDGRID_INSTRUCTIONS)
instructions.other_messages.append(constants.MESSAGE_RUN_FREQUENCY)
return instructions
@staticmethod
def _get_gcs_path(resources):
"""Get gcs path from resources.
Args:
resources (list): List of resources under the notifier section in
the forseti_config_server.yaml file.
Returns:
str: The gcs path.
"""
for resource in resources:
notifiers = resource['notifiers']
for notifier in notifiers:
if notifier['name'] == 'gcs_violations':
return notifier['configuration']['gcs_path']
return ''
@staticmethod
def _swap_config_fields(old_config, new_config):
"""Swapping fields. This will work for all v1 migrating to v2.
Note: new_config will get modified.
Args:
old_config (dict): Old configuration.
new_config (dict): New configuration.
"""
# pylint: disable=too-many-locals
# Some fields have been renamed from v1 to v2
# This is a map to map names from v2 back to v1
global_to_inventory = [
'domain_super_admin_email'
]
new_conf_inventory = new_config['inventory']
old_config_global = ({} if 'global' not in old_config
else old_config['global'])
for field in global_to_inventory:
if field in old_config_global:
new_conf_inventory[field] = (old_config_global[field]
or new_conf_inventory[field])
old_notifier_resources = old_config['notifier']['resources']
new_notifier_resources = new_config['notifier']['resources']
new_scanner_gcs_path = ForsetiServerInstaller._get_gcs_path(
new_notifier_resources)
resource_name_to_index = {}
for idx, old_resource in enumerate(old_notifier_resources):
resource_name_to_index[old_resource['resource']] = idx
for idx, resource in enumerate(new_notifier_resources):
resource_name = resource['resource']
if resource_name in resource_name_to_index:
# if resource_name is in the old notifier section, replace
# the new notifier section with the old one and update the
# values accordingly.
new_notifier_resources[idx] = old_notifier_resources[
resource_name_to_index[resource_name]]
resource_to_update = new_notifier_resources[idx]
resource_to_update['notifiers'] = resource_to_update.pop(
'pipelines')
for notifier in resource_to_update['notifiers']:
notifier['name'] = notifier['name'].replace('_pipeline', '')
if notifier['name'] == 'gcs_violations':
notifier['configuration']['gcs_path'] = (
new_scanner_gcs_path)
| 40.035417
| 80
| 0.619816
|
e6dda082dc58382563d5cf1afe19ad020a4afe1c
| 716
|
py
|
Python
|
examples/fire.py
|
msabramo/diesel
|
a1ed7ed0358d0fec8592e23aafc3b7ec167ab649
|
[
"BSD-3-Clause"
] | 224
|
2015-01-03T06:33:05.000Z
|
2021-11-22T03:19:02.000Z
|
examples/fire.py
|
dowski/diesel
|
d9824e467805caf40e0ba21b88a27db38e64c352
|
[
"BSD-3-Clause"
] | 12
|
2015-01-01T03:35:15.000Z
|
2021-05-22T23:37:28.000Z
|
examples/fire.py
|
dowski/diesel
|
d9824e467805caf40e0ba21b88a27db38e64c352
|
[
"BSD-3-Clause"
] | 37
|
2015-01-04T01:47:55.000Z
|
2022-03-03T02:04:15.000Z
|
# vim:ts=4:sw=4:expandtab
'''Example of event firing.
'''
import time
import random
from diesel import (quickstart, quickstop, sleep,
fire, wait, log, loglevels,
set_log_level)
set_log_level(loglevels.DEBUG)
def gunner():
x = 1
while True:
fire('bam', x)
x += 1
sleep()
def sieged():
t = time.time()
while True:
n = wait('bam')
if n % 10000 == 0:
log.info(str(n))
if n == 50000:
delt = time.time() - t
log.debug("50,000 messages in {0:.3f}s {1:.1f}/s)", delt, 50000 / delt)
quickstop()
log = log.name('fire-system')
quickstart(gunner, sieged)
| 22.375
| 87
| 0.511173
|
c6c6221ddf7aeb49e095eb6825c446d6d64076a2
| 943
|
py
|
Python
|
Desafios/desafio-53.py
|
marielitonmb/Curso-Python3
|
26215c47c4d1eadf940b8024305b7e9ff600883b
|
[
"MIT"
] | null | null | null |
Desafios/desafio-53.py
|
marielitonmb/Curso-Python3
|
26215c47c4d1eadf940b8024305b7e9ff600883b
|
[
"MIT"
] | null | null | null |
Desafios/desafio-53.py
|
marielitonmb/Curso-Python3
|
26215c47c4d1eadf940b8024305b7e9ff600883b
|
[
"MIT"
] | null | null | null |
# Aula 13 - Desafio 53: Detector de palindromo
# Ler uma frase qualquer e dizer se eh um palindromo, desconsiderando os espaços
# Ex: "apos a sopa", "a sacada da casa", "a torre da derrota", "o lobo ama o bolo"
frase = input('Digite uma frase: ').lower().strip()
frase = frase.replace(' ', '')
frase_inv = frase[::-1]
if frase == frase_inv:
print(f'{frase} eh igual a {frase_inv}')
print('Logo \033[1;4mSIM\033[m, eh um palindromo!')
else:
print(f'{frase} eh diferente de {frase_inv}')
print('Logo \033[1;4mNAO EH\033[m um palindromo!')
'''
### outra maneira ###
frase = input('Digite uma frase: ').lower().strip()
palavras = frase.split()
juntar = ''.join(palavras)
inverter = ''
for letra in range(len(juntar) - 1, -1, -1):
inverter += juntar[letra]
print(f'O inverso de {juntar} eh {inverter}.')
if juntar == inverter:
print('Temos um palindromo!')
else:
print('A frase digitada nao eh um palindromo!')
'''
| 29.46875
| 82
| 0.654295
|
cfdbd9542d475c890b06e5259d58ea5125f9c7ed
| 4,216
|
py
|
Python
|
tests/test_cursor_fetchall.py
|
adh/ctds
|
8c8b562341fb9635e3d89013ff06ffc6b1397abb
|
[
"MIT"
] | 78
|
2016-03-14T18:02:05.000Z
|
2021-11-26T23:23:06.000Z
|
tests/test_cursor_fetchall.py
|
adh/ctds
|
8c8b562341fb9635e3d89013ff06ffc6b1397abb
|
[
"MIT"
] | 64
|
2016-10-18T17:54:08.000Z
|
2021-09-30T11:01:02.000Z
|
tests/test_cursor_fetchall.py
|
adh/ctds
|
8c8b562341fb9635e3d89013ff06ffc6b1397abb
|
[
"MIT"
] | 17
|
2016-07-21T20:22:12.000Z
|
2020-11-07T01:25:26.000Z
|
import ctds
from .base import TestExternalDatabase
class TestCursorFetchAll(TestExternalDatabase):
'''Unit tests related to the Cursor.fetchall() method.
'''
def test___doc__(self):
self.assertEqual(
ctds.Cursor.fetchall.__doc__,
'''\
fetchall()
Fetch all (remaining) rows of a query result, returning them as a
sequence of sequences.
:pep:`0249#fetchall`
:return: A sequence of result rows.
:rtype: ctds.RowList
'''
)
def test_closed(self):
with self.connect() as connection:
cursor = connection.cursor()
cursor.close()
try:
cursor.fetchall()
except ctds.InterfaceError as ex:
self.assertEqual(str(ex), 'cursor closed')
else:
self.fail('.fetchall() did not fail as expected') # pragma: nocover
def test_closed_connection(self):
connection = self.connect()
with connection.cursor() as cursor:
connection.close()
try:
cursor.fetchall()
except ctds.InterfaceError as ex:
self.assertEqual(str(ex), 'connection closed')
else:
self.fail('.fetchall() did not fail as expected') # pragma: nocover
def test_premature(self):
with self.connect() as connection:
with connection.cursor() as cursor:
self.assertRaises(ctds.InterfaceError, cursor.fetchall)
def test_fetchall(self):
with self.connect() as connection:
with connection.cursor() as cursor:
cursor.execute(
'''
DECLARE @{0} TABLE(i INT);
INSERT INTO @{0}(i) VALUES (1),(2),(3);
SELECT * FROM @{0};
SELECT i * 2 FROM @{0};
'''.format(self.test_fetchall.__name__)
)
self.assertEqual([tuple(row) for row in cursor.fetchall()], [(1,), (2,), (3,)])
self.assertEqual(list(cursor.fetchall()), [])
self.assertEqual(cursor.nextset(), True)
self.assertEqual([tuple(row) for row in cursor.fetchall()], [(2,), (4,), (6,)])
self.assertEqual(list(cursor.fetchall()), [])
self.assertEqual(cursor.nextset(), None)
self.assertRaises(ctds.InterfaceError, cursor.fetchall)
def test_empty_resultset(self):
with self.connect() as connection:
with connection.cursor() as cursor:
cursor.execute(
'''
DECLARE @{0} TABLE(i INT);
INSERT INTO @{0}(i) VALUES (1),(2),(3);
SELECT i FROM @{0} WHERE i < 0;
'''.format(self.test_empty_resultset.__name__)
)
self.assertEqual(list(cursor.fetchall()), [])
self.assertEqual(cursor.nextset(), None)
def test_multiple_resultsets(self):
with self.connect() as connection:
with connection.cursor() as cursor:
cursor.execute(
'''
DECLARE @{0} TABLE(i INT);
INSERT INTO @{0}(i) VALUES (1),(2),(3);
SELECT i FROM @{0} WHERE i < 0;
SELECT i AS j FROM @{0} WHERE i > 2;
SELECT i AS k FROM @{0} WHERE i > 3;
SELECT i AS ii FROM @{0};
'''.format(self.test_multiple_resultsets.__name__)
)
self.assertEqual(list(cursor.fetchall()), [])
self.assertEqual(cursor.nextset(), True)
self.assertEqual([tuple(row) for row in cursor.fetchall()], [(3,)])
self.assertEqual(list(cursor.fetchall()), [])
self.assertEqual(cursor.nextset(), True)
self.assertEqual(list(cursor.fetchall()), [])
self.assertEqual(cursor.nextset(), True)
self.assertEqual([tuple(row) for row in cursor.fetchall()], [(1,), (2,), (3,)])
self.assertEqual(cursor.nextset(), None)
| 40.152381
| 95
| 0.514943
|
1fc61fcaa344c7f5b3a6ecfb05dd652688724193
| 12,377
|
py
|
Python
|
qemu/tests/qemu-iotests/testrunner.py
|
hyunjoy/scripts
|
01114d3627730d695b5ebe61093c719744432ffa
|
[
"Apache-2.0"
] | 44
|
2022-03-16T08:32:31.000Z
|
2022-03-31T16:02:35.000Z
|
qemu/tests/qemu-iotests/testrunner.py
|
hyunjoy/scripts
|
01114d3627730d695b5ebe61093c719744432ffa
|
[
"Apache-2.0"
] | 1
|
2022-03-29T02:30:28.000Z
|
2022-03-30T03:40:46.000Z
|
qemu/tests/qemu-iotests/testrunner.py
|
hyunjoy/scripts
|
01114d3627730d695b5ebe61093c719744432ffa
|
[
"Apache-2.0"
] | 18
|
2022-03-19T04:41:04.000Z
|
2022-03-31T03:32:12.000Z
|
# Class for actually running tests.
#
# Copyright (c) 2020-2021 Virtuozzo International GmbH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
from pathlib import Path
import datetime
import time
import difflib
import subprocess
import contextlib
import json
import termios
import sys
from contextlib import contextmanager
from typing import List, Optional, Iterator, Any, Sequence, Dict, \
ContextManager
from testenv import TestEnv
def silent_unlink(path: Path) -> None:
try:
path.unlink()
except OSError:
pass
def file_diff(file1: str, file2: str) -> List[str]:
with open(file1, encoding="utf-8") as f1, \
open(file2, encoding="utf-8") as f2:
# We want to ignore spaces at line ends. There are a lot of mess about
# it in iotests.
# TODO: fix all tests to not produce extra spaces, fix all .out files
# and use strict diff here!
seq1 = [line.rstrip() for line in f1]
seq2 = [line.rstrip() for line in f2]
res = [line.rstrip()
for line in difflib.unified_diff(seq1, seq2, file1, file2)]
return res
# We want to save current tty settings during test run,
# since an aborting qemu call may leave things screwed up.
@contextmanager
def savetty() -> Iterator[None]:
isterm = sys.stdin.isatty()
if isterm:
fd = sys.stdin.fileno()
attr = termios.tcgetattr(fd)
try:
yield
finally:
if isterm:
termios.tcsetattr(fd, termios.TCSADRAIN, attr)
class LastElapsedTime(ContextManager['LastElapsedTime']):
""" Cache for elapsed time for tests, to show it during new test run
It is safe to use get() at any time. To use update(), you must either
use it inside with-block or use save() after update().
"""
def __init__(self, cache_file: str, env: TestEnv) -> None:
self.env = env
self.cache_file = cache_file
self.cache: Dict[str, Dict[str, Dict[str, float]]]
try:
with open(cache_file, encoding="utf-8") as f:
self.cache = json.load(f)
except (OSError, ValueError):
self.cache = {}
def get(self, test: str,
default: Optional[float] = None) -> Optional[float]:
if test not in self.cache:
return default
if self.env.imgproto not in self.cache[test]:
return default
return self.cache[test][self.env.imgproto].get(self.env.imgfmt,
default)
def update(self, test: str, elapsed: float) -> None:
d = self.cache.setdefault(test, {})
d.setdefault(self.env.imgproto, {})[self.env.imgfmt] = elapsed
def save(self) -> None:
with open(self.cache_file, 'w', encoding="utf-8") as f:
json.dump(self.cache, f)
def __enter__(self) -> 'LastElapsedTime':
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.save()
class TestResult:
def __init__(self, status: str, description: str = '',
elapsed: Optional[float] = None, diff: Sequence[str] = (),
casenotrun: str = '', interrupted: bool = False) -> None:
self.status = status
self.description = description
self.elapsed = elapsed
self.diff = diff
self.casenotrun = casenotrun
self.interrupted = interrupted
class TestRunner(ContextManager['TestRunner']):
def __init__(self, env: TestEnv, makecheck: bool = False,
color: str = 'auto') -> None:
self.env = env
self.test_run_env = self.env.get_env()
self.makecheck = makecheck
self.last_elapsed = LastElapsedTime('.last-elapsed-cache', env)
assert color in ('auto', 'on', 'off')
self.color = (color == 'on') or (color == 'auto' and
sys.stdout.isatty())
self._stack: contextlib.ExitStack
def __enter__(self) -> 'TestRunner':
self._stack = contextlib.ExitStack()
self._stack.enter_context(self.env)
self._stack.enter_context(self.last_elapsed)
self._stack.enter_context(savetty())
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self._stack.close()
def test_print_one_line(self, test: str, starttime: str,
endtime: Optional[str] = None, status: str = '...',
lasttime: Optional[float] = None,
thistime: Optional[float] = None,
description: str = '',
test_field_width: Optional[int] = None,
end: str = '\n') -> None:
""" Print short test info before/after test run """
test = os.path.basename(test)
if test_field_width is None:
test_field_width = 8
if self.makecheck and status != '...':
if status and status != 'pass':
status = f' [{status}]'
else:
status = ''
print(f' TEST iotest-{self.env.imgfmt}: {test}{status}')
return
if lasttime:
lasttime_s = f' (last: {lasttime:.1f}s)'
else:
lasttime_s = ''
if thistime:
thistime_s = f'{thistime:.1f}s'
else:
thistime_s = '...'
if endtime:
endtime = f'[{endtime}]'
else:
endtime = ''
if self.color:
if status == 'pass':
col = '\033[32m'
elif status == 'fail':
col = '\033[1m\033[31m'
elif status == 'not run':
col = '\033[33m'
else:
col = ''
col_end = '\033[0m'
else:
col = ''
col_end = ''
print(f'{test:{test_field_width}} {col}{status:10}{col_end} '
f'[{starttime}] {endtime:13}{thistime_s:5} {lasttime_s:14} '
f'{description}', end=end)
def find_reference(self, test: str) -> str:
if self.env.cachemode == 'none':
ref = f'{test}.out.nocache'
if os.path.isfile(ref):
return ref
ref = f'{test}.out.{self.env.imgfmt}'
if os.path.isfile(ref):
return ref
ref = f'{test}.{self.env.qemu_default_machine}.out'
if os.path.isfile(ref):
return ref
return f'{test}.out'
def do_run_test(self, test: str) -> TestResult:
f_test = Path(test)
f_bad = Path(f_test.name + '.out.bad')
f_notrun = Path(f_test.name + '.notrun')
f_casenotrun = Path(f_test.name + '.casenotrun')
f_reference = Path(self.find_reference(test))
if not f_test.exists():
return TestResult(status='fail',
description=f'No such test file: {f_test}')
if not os.access(str(f_test), os.X_OK):
sys.exit(f'Not executable: {f_test}')
if not f_reference.exists():
return TestResult(status='not run',
description='No qualified output '
f'(expected {f_reference})')
for p in (f_bad, f_notrun, f_casenotrun):
silent_unlink(p)
args = [str(f_test.resolve())]
if self.env.debug:
args.append('-d')
with f_test.open(encoding="utf-8") as f:
try:
if f.readline().rstrip() == '#!/usr/bin/env python3':
args.insert(0, self.env.python)
except UnicodeDecodeError: # binary test? for future.
pass
env = os.environ.copy()
env.update(self.test_run_env)
t0 = time.time()
with f_bad.open('w', encoding="utf-8") as f:
proc = subprocess.Popen(args, cwd=str(f_test.parent), env=env,
stdout=f, stderr=subprocess.STDOUT)
try:
proc.wait()
except KeyboardInterrupt:
proc.terminate()
proc.wait()
return TestResult(status='not run',
description='Interrupted by user',
interrupted=True)
ret = proc.returncode
elapsed = round(time.time() - t0, 1)
if ret != 0:
return TestResult(status='fail', elapsed=elapsed,
description=f'failed, exit status {ret}',
diff=file_diff(str(f_reference), str(f_bad)))
if f_notrun.exists():
return TestResult(status='not run',
description=f_notrun.read_text().strip())
casenotrun = ''
if f_casenotrun.exists():
casenotrun = f_casenotrun.read_text()
diff = file_diff(str(f_reference), str(f_bad))
if diff:
return TestResult(status='fail', elapsed=elapsed,
description=f'output mismatch (see {f_bad})',
diff=diff, casenotrun=casenotrun)
else:
f_bad.unlink()
self.last_elapsed.update(test, elapsed)
return TestResult(status='pass', elapsed=elapsed,
casenotrun=casenotrun)
def run_test(self, test: str,
test_field_width: Optional[int] = None) -> TestResult:
last_el = self.last_elapsed.get(test)
start = datetime.datetime.now().strftime('%H:%M:%S')
if not self.makecheck:
self.test_print_one_line(test=test, starttime=start,
lasttime=last_el, end='\r',
test_field_width=test_field_width)
res = self.do_run_test(test)
end = datetime.datetime.now().strftime('%H:%M:%S')
self.test_print_one_line(test=test, status=res.status,
starttime=start, endtime=end,
lasttime=last_el, thistime=res.elapsed,
description=res.description,
test_field_width=test_field_width)
if res.casenotrun:
print(res.casenotrun)
return res
def run_tests(self, tests: List[str]) -> bool:
n_run = 0
failed = []
notrun = []
casenotrun = []
if not self.makecheck:
self.env.print_env()
print()
test_field_width = max(len(os.path.basename(t)) for t in tests) + 2
for t in tests:
name = os.path.basename(t)
res = self.run_test(t, test_field_width=test_field_width)
assert res.status in ('pass', 'fail', 'not run')
if res.casenotrun:
casenotrun.append(t)
if res.status != 'not run':
n_run += 1
if res.status == 'fail':
failed.append(name)
if self.makecheck:
self.env.print_env()
if res.diff:
print('\n'.join(res.diff))
elif res.status == 'not run':
notrun.append(name)
if res.interrupted:
break
if notrun:
print('Not run:', ' '.join(notrun))
if casenotrun:
print('Some cases not run in:', ' '.join(casenotrun))
if failed:
print('Failures:', ' '.join(failed))
print(f'Failed {len(failed)} of {n_run} iotests')
return False
else:
print(f'Passed all {n_run} iotests')
return True
| 33.271505
| 79
| 0.540196
|
bad77ea1b879af57391959a0a3e22c8e51226d71
| 23,183
|
py
|
Python
|
service/energy_compute.py
|
drunkcoding/model-inference
|
02d2240bc7052fa32223a80fa63625fe681db102
|
[
"MIT"
] | 1
|
2021-11-15T19:07:13.000Z
|
2021-11-15T19:07:13.000Z
|
service/energy_compute.py
|
drunkcoding/model-inference
|
02d2240bc7052fa32223a80fa63625fe681db102
|
[
"MIT"
] | null | null | null |
service/energy_compute.py
|
drunkcoding/model-inference
|
02d2240bc7052fa32223a80fa63625fe681db102
|
[
"MIT"
] | null | null | null |
from typing import Iterable
from tqdm import tqdm
import sys
import os
import threading
import requests
import numpy as np
import pandas as pd
import re
import subprocess
import io
from collections import deque
import multiprocessing as mp
import mysql.connector
import time
from operator import itemgetter
from responses import activate
from flask import Flask, request, jsonify, g
app = Flask(__name__)
DB_NAME = "serve.db"
connector_options = {"user": "admin", "host": "127.0.0.1", "database": "hyserve"}
def get_db():
db = getattr(g, "_database", None)
if db is None:
db = g._database = mysql.connector.connect(**connector_options)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, "_database", None)
if db is not None:
db.close()
# json={
# "request_id": self.request_id,
# "correlation_id": self.correlation_id,
# "epos": self.config.epos,
# "ppos": elf.config.ppos,
# "type": self.config.type,
# "start": request_start_time,
# "end": request_end_time,
# },
QUERY_INTERVAL = 0.15
MICRO_INTERVAL = 1e-3
dtype = [("start", int), ("end", int)]
command = "nvidia-smi --query-gpu=index,uuid,gpu_bus_id --format=csv"
result = subprocess.run(command.split(), stdout=subprocess.PIPE)
df = pd.read_csv(
io.StringIO(result.stdout.decode("utf-8")),
index_col="index",
converters={"uuid": str.strip},
)
df = df.sort_index()
df.iloc[:, 0] = df.iloc[:, 0].str.strip()
print(df)
print(df.columns)
# print(df.iloc[0, 0] == "GPU-0a0629f3-1b2a-f676-2b26-d91b9f0f5c61")
gpu_uuids = df.iloc[:, 0].to_list()
# if __name__ == "__main__":
con = mysql.connector.connect(**connector_options)
cur = con.cursor()
cur.execute(
"""CREATE TABLE IF NOT EXISTS measurement
( id VARCHAR(128) NOT NULL,
uuid VARCHAR(128),
start DOUBLE,
end DOUBLE,
energy DOUBLE,
power DOUBLE,
processed BOOLEAN,
UNIQUE(id) )"""
)
cur.execute(
"""CREATE TABLE IF NOT EXISTS request
(id VARCHAR(128) NOT NULL,
uuid VARCHAR(128),
request_id INT UNSIGNED,
correlation_id INT UNSIGNED,
epos INT UNSIGNED,
ppos INT UNSIGNED,
type VARCHAR(128),
start DOUBLE,
end DOUBLE,
util DOUBLE,
energy DOUBLE,
processed BOOLEAN,
UNIQUE(id))"""
)
# cur.execute("DELETE FROM request WHERE type = 'none'")
cur.executemany(
"""INSERT INTO request VALUES (UUID(), %s, 0, 0, 0, 0, 'none', 0, 0, 0, 0, 0)""",
[(uuid,) for uuid in gpu_uuids],
)
cur.executemany(
"""INSERT INTO request VALUES (UUID(), %s, 0, 0, 0, 0, 'void', 0, 0, 0, 0, 0)""",
[(uuid,) for uuid in gpu_uuids],
)
con.commit()
con.close()
def db_metrics():
last_data = None
con = mysql.connector.connect(**connector_options)
cur = con.cursor()
print("db_metrics", os.getpid(), flush=True)
last_timestamp = None
while True:
start_time = time.perf_counter()
response = requests.get("http://localhost:8002/metrics")
text = response.text
energy_groups = re.findall(
r'nv_energy_consumption{gpu_uuid="(.*)"} (\d+.\d+)', text
)
energy_groups = dict(energy_groups)
power_groups = re.findall(
r'nv_gpu_power_usage{gpu_uuid="(.*)"} (\d+.\d+)', text
)
power_groups = dict(power_groups)
# print(energy_groups)
measurement_list = []
# end_time = time.perf_counter()
for uuid in energy_groups:
cum_energy = float(energy_groups[uuid])
cur_energy = (
-1 if last_data is None else cum_energy - float(last_data[uuid])
)
measurement_list.append(
(
uuid,
last_timestamp + 1e-6 if last_timestamp is not None else start_time,
start_time, # to distinguish start and last end
cur_energy,
float(power_groups[uuid]),
)
)
if last_data is not None:
for uuid in energy_groups:
if last_data[uuid] == energy_groups[uuid]:
print("same reading", uuid, last_data[uuid], energy_groups[uuid])
assert last_data[uuid] != energy_groups[uuid]
cur.executemany(
"INSERT INTO measurement VALUES (UUID(), %s, %s, %s, %s, %s, 0)",
measurement_list,
)
last_data = energy_groups
last_timestamp = start_time
con.commit()
end_time = time.perf_counter()
# print(start_time, end_time - start_time)
time.sleep(max(0, QUERY_INTERVAL - (end_time - start_time)))
def update_request_energy(cur, id, energy):
if isinstance(id, Iterable):
args = list(zip(energy, id))
cur.executemany(
"""
UPDATE request
SET energy = energy + %s
WHERE id = %s
""",
args,
)
return
cur.execute(
"""
UPDATE request
SET energy = energy + %s
WHERE id = %s
""",
(id, energy),
)
def update_gpu_energy(cur, uuid, type, energy):
cur.execute(
"""
UPDATE request
SET energy = energy + %s
WHERE uuid = %s and type = %s
""",
(energy, uuid, type),
)
# def update_gpu_energy(cur, uuid, type, energy, **kwds):
# if "request_id" in kwds:
# cur.execute(
# """
# UPDATE request
# SET energy = energy + %s
# WHERE uuid = %s and type = %s and request_id = %s and correlation_id = %s and epos = %s and ppos = %s
# """,
# (
# energy,
# uuid,
# type,
# kwds["request_id"],
# kwds["correlation_id"],
# kwds["epos"],
# kwds["ppos"],
# ),
# )
# else:
# cur.execute(
# """
# UPDATE request
# SET energy = energy + %s
# WHERE uuid = %s and type = %s
# """,
# (energy, uuid, type),
# )
def share_energy(request_list, energy, active_set, holder):
util_sum = 0
for k in active_set:
util_sum += request_list[k]["util"]
holder[k] += energy * request_list[k]["util"]
if util_sum > 1:
print("util_sum", util_sum, active_set)
assert util_sum <= 1
# idle energy consumption
for k in active_set:
holder[k] += energy * (1 - util_sum) / len(active_set)
return holder
LABEL_REQ_START = 0
LABEL_REQ_END = 1
LABEL_ENG_START = 2
LABEL_ENG_END = 3
def request_metrics():
con = mysql.connector.connect(**connector_options)
cur = con.cursor()
print("request_metrics", os.getpid(), flush=True)
while True:
start_time = time.perf_counter()
print(start_time, flush=True)
cur.execute(
"SELECT * FROM request WHERE processed = 0 and type != 'none' and type != 'void' ORDER BY start ASC"
)
req_rows = list(cur)
req_names = [description[0] for description in cur.description]
cur.execute("SELECT * FROM measurement WHERE processed = 0 ORDER BY start ASC")
measure_rows = list(cur)
measure_names = [description[0] for description in cur.description]
# print([row[0] for row in measure_rows])
print(len(req_rows), len(measure_rows), flush=True)
gpu_energy = {
uuid: [
dict(zip(measure_names, list(row)))
for row in measure_rows
if row[1] == uuid
]
for uuid in gpu_uuids
}
request_queue = {
uuid: [
dict(zip(req_names, list(row))) for row in req_rows if row[1] == uuid
]
for uuid in gpu_uuids
}
# print("gpu_energy", gpu_energy)
# print("request_queue", request_queue)
for uuid in gpu_uuids:
# TEST_GPU_SUM += gpu_energy[uuid][-1]["energy"]
# if len(request_queue[uuid]) == 0:
# energy = gpu_energy[uuid][-1]["energy"]
# update_gpu_energy(cur, uuid, "void", energy)
# # TEST_MODEL_SUM += gpu_energy[uuid][-1]["energy"]
# # print("no request %s %s" % ("none", energy), flush=True)
# con.commit()
# continue
# req_time = np.array(
# [(req["start"], req["end"]) for req in request_queue[uuid]], dtype=dtype
# )
# req_sort_idx = np.argsort(req_time, order=["start", "end"])
request_list = request_queue[uuid]
measure_list = gpu_energy[uuid]
# print(measure_list)
request_energy_holder = {idx: 0 for idx in range(len(request_list))}
# request_energy_holder["none"] = 0
all_timestamps = (
[
(request["start"], LABEL_REQ_START, i)
for i, request in enumerate(request_list)
]
+ [
(request["end"], LABEL_REQ_END, i)
for i, request in enumerate(request_list)
]
+ [
(measure["start"], LABEL_ENG_START, i)
for i, measure in enumerate(measure_list)
]
+ [
(measure["end"], LABEL_ENG_END, i)
for i, measure in enumerate(measure_list)
]
)
all_timestamps = sorted(all_timestamps)
# all_timestamps = np.sort(all_timestamps, axis=None)
all_timestamps = list(zip(all_timestamps[:-1], all_timestamps[1:]))
active_req_set = set()
active_eng_set = set()
finish_req_set = set()
finsish_eng_set = set()
for item_l, item_r in tqdm(all_timestamps, desc="all_timestamps"):
time_l, label_l, idx_l = item_l
time_r, label_r, idx_r = item_r
print(item_l, item_r)
if label_l == LABEL_REQ_START:
active_req_set.add(idx_l)
# if label_r == LABEL_REQ_START:
# active_req_set.add(idx_r)
if label_l == LABEL_ENG_START:
active_eng_set.add(idx_l)
# if label_r == LABEL_ENG_START:
# active_eng_set.add(idx_r)
# print(active_req_set, active_eng_set)
if label_l == LABEL_ENG_END and label_r == LABEL_ENG_START:
continue
if len(active_eng_set) == 0:
print("no energy measured")
# active_req_set = set()
continue
# # no request in this interval
# if label_l == LABEL_ENG_START and label_r == LABEL_ENG_END:
# assert idx_l == idx_r
# energy = measure_list[idx_l]["energy"]
# update_gpu_energy(cur, uuid, "void", energy)
idx = list(active_eng_set)[0]
energy = (
measure_list[idx]["energy"]
* (time_r - time_l)
/ (measure_list[idx]["end"] - measure_list[idx]["start"])
)
# no request in this micro-interval
if (
(label_l == LABEL_ENG_START and label_r == LABEL_REQ_START)
or (label_l == LABEL_ENG_START and label_r == LABEL_ENG_END)
or (label_l == LABEL_REQ_END and label_r == LABEL_ENG_END)
):
if label_l == LABEL_ENG_START and label_r == LABEL_ENG_END:
update_gpu_energy(cur, uuid, "void", energy)
else:
update_gpu_energy(cur, uuid, "none", energy)
# share request in this micro-interval
else:
# if (label_l == LABEL_ENG_START and label_r == LABEL_REQ_START) or ():
# idx = list(active_eng_set)[0]
# energy = (
# measure_list[idx]["energy"]
# * (time_r - time_l)
# / (measure_list[idx]["end"] - measure_list[idx]["start"])
# )
print(active_req_set, energy)
request_energy_holder = share_energy(
request_list, energy, active_req_set, request_energy_holder
)
print(request_energy_holder)
con.commit()
# assert len(active_eng_set) == 1
if label_r == LABEL_REQ_END:
active_req_set.remove(idx_r)
finish_req_set.add(idx_r)
if label_r == LABEL_ENG_END:
active_eng_set.remove(idx_r)
finsish_eng_set.add(idx_r)
# for idx in tqdm(request_energy_holder, desc="request_energy_holder"):
# energy = request_energy_holder[idx]
# request_list[idx]["energy"] = energy
update_request_energy(cur, [request_list[i]['id'] for i in request_energy_holder.keys()], list(request_energy_holder.values()))
con.commit()
# completely finished
cur.executemany(
"UPDATE measurement SET processed = 1 WHERE id = %s",
[(measure_list[i]["id"],) for i in finsish_eng_set],
)
cur.executemany(
"UPDATE request SET processed = 1 WHERE id = %s",
[(request_list[i]["id"],) for i in finish_req_set],
)
con.commit()
# assert len(active_eng_set) == 0
# partially finished modify timestamp
# active_req_set = active_req_set.difference(finish_req_set)
# active_eng_set = active_req_set.difference(finsish_eng_set)
# idx = 0
# energy_idx = 0
# waiting_list = []
# # for t_inv in tqdm(time_intervals, desc="time_intervals"):
# # time_start_bar = t_inv
# # time_end_bar = time_start_bar + MICRO_INTERVAL
# for time_start_bar, time_end_bar in all_timestamps:
# micro_interval = time_end_bar - time_start_bar
# activate_req_set = []
# for i in range(idx, len(request_list)):
# if (
# request_list[i]["start"] >= time_start_bar
# and request_list[i]["end"] <= time_end_bar
# ):
# activate_req_set.append(i)
# if request_list[i]["start"] >= time_end_bar:
# break
# print(
# "activate_req_set",
# micro_interval,
# time_start_bar,
# time_end_bar,
# activate_req_set,
# )
# energy = 0
# energy_interval = 0
# for i in range(energy_idx, len(measure_list)):
# energy_idx = i
# query_interval = (
# measure_list[i]["end"] - measure_list[i]["start"]
# )
# query_energy = measure_list[i]["energy"]
# # interval completely enclosed in measurement
# if (
# measure_list[i]["start"] <= time_start_bar
# and measure_list[i]["end"] >= time_end_bar
# ):
# energy += micro_interval / query_interval * query_energy
# energy_interval += query_interval
# print(
# "energy enclosed",
# energy,
# energy_interval,
# query_energy,
# measure_list[i]["end"],
# measure_list[i]["start"],
# )
# break
# # interval cross boundary of measurement
# if i == len(measure_list) - 1:
# break
# if (
# measure_list[i]["end"] > time_start_bar
# and measure_list[i]["end"] < time_end_bar
# ):
# energy += (
# (measure_list[i]["end"] - time_start_bar)
# / query_interval
# * query_energy
# )
# energy_interval += query_interval
# print(
# "energy cross",
# energy,
# energy_interval,
# measure_list[i]["end"],
# measure_list[i]["start"],
# )
# # find latest query that contains the interval
# for k in range(i + 1, len(measure_list)):
# query_interval = (
# measure_list[k]["end"] - measure_list[k]["start"]
# )
# # if (
# # measure_list[k]["start"] > time_start_bar
# # and measure_list[k]["end"] < time_end_bar
# # ):
# # energy += measure_list[k]["energy"]
# # energy_interval += query_interval
# # print("energy full", energy, energy_interval, measure_list[k]["end"], measure_list[k]["start"])
# if (
# measure_list[k]["end"] >= time_end_bar
# and measure_list[k]["start"] < time_end_bar
# ):
# energy += (
# (time_end_bar - measure_list[k]["start"])
# / (
# measure_list[k]["end"]
# - measure_list[k]["start"]
# )
# * measure_list[k]["energy"]
# )
# energy_interval += query_interval
# print(
# "energy tail",
# energy,
# energy_interval,
# measure_list[k]["end"],
# measure_list[k]["start"],
# )
# break
# break
# print("energy", energy, energy_interval)
# # print(
# # time_start_bar,
# # time_start_bar,
# # measure_list[0]["start"],
# # measure_list[0]["end"],
# # measure_list[-1]["start"],
# # measure_list[-1]["end"],
# # )
# if energy == 0:
# # energy_idx = 0
# waiting_list += activate_req_set
# print("no energy measured")
# continue
# # energy_idx = energy_idx[0]
# if len(activate_req_set) > 0:
# idx = min(activate_req_set)
# if len(activate_req_set) == 0:
# request_energy_holder["none"] += energy
# continue
# # active energy consumption
# util_sum = 0
# for k in activate_req_set:
# util_sum += request_list[k]["util"]
# request_energy_holder[k] += energy * request_list[k]["util"]
# assert util_sum < 1
# # idle energy consumption
# for k in activate_req_set:
# request_energy_holder[k] += (
# energy * (1 - util_sum) / len(activate_req_set)
# )
# # print("request_energy_holder", request_energy_holder, flush=True)
# for idx in tqdm(request_energy_holder, desc="request_energy_holder"):
# if idx == "none":
# energy = request_energy_holder[idx]
# # print("has request %s %s" % ("none", energy), flush=True)
# update_gpu_energy(cur, uuid, "none", energy)
# # TEST_MODEL_SUM += energy
# continue
# energy = request_energy_holder[idx]
# # if energy == 0:
# # print("error", idx, request_list[idx])
# # assert energy > 0
# request_list[idx]["energy"] = energy
# update_gpu_energy(cur, **request_list[idx])
# con.commit()
# # TEST_MODEL_SUM += energy
# # gpu_energy[uuid] = gpu_energy[uuid][energy_idx:]
# energy_idx = min(energy_idx, len(gpu_energy[uuid]) - 1)
# cur.execute(
# "UPDATE measurement SET processed = 1 WHERE end <= %s", (time_end_bar,),
# )
# cur.execute(
# "UPDATE request SET processed = 1 WHERE end <= %s", (time_end_bar,),
# )
# if len(waiting_list) > 0:
# cur.executemany(
# "UPDATE request SET processed = 0 WHERE id = %s",
# [(request_list[k]["id"],) for k in waiting_list],
# )
# con.commit()
end_time = time.perf_counter()
time.sleep(max(0, 1 - (end_time - start_time)))
@app.route("/meter/<uuid>", methods=["POST"])
def meter_handle(uuid):
content = request.json
con = get_db()
cur = con.cursor()
# print(content)
cur.execute(
"""INSERT INTO request VALUES (UUID(), %s, %s, %s, %s, %s, %s, %s, %s, %s, 0, 0)""",
(uuid,) + tuple(list(content.values())),
)
con.commit()
cur.close()
# with lock:
# req_dict[uuid] += [content]
# # print("meter_handle", req_dict)
return jsonify({"uuid": uuid})
if __name__ == "__main__":
mp.Process(target=db_metrics).start()
time.sleep(0.5)
mp.Process(target=request_metrics).start()
app.run(host="0.0.0.0", port=10000, debug=False)
| 36.110592
| 139
| 0.475305
|
8df53aaefbc6653e9b5cf787d11685229be2875d
| 901
|
py
|
Python
|
head_first_design_patterns/factory/simple_factory/main.py
|
incolumepy-cursos/poop
|
e4ac26b8d2a8c263a93fd9642fab52aafda53d80
|
[
"MIT"
] | null | null | null |
head_first_design_patterns/factory/simple_factory/main.py
|
incolumepy-cursos/poop
|
e4ac26b8d2a8c263a93fd9642fab52aafda53d80
|
[
"MIT"
] | null | null | null |
head_first_design_patterns/factory/simple_factory/main.py
|
incolumepy-cursos/poop
|
e4ac26b8d2a8c263a93fd9642fab52aafda53d80
|
[
"MIT"
] | null | null | null |
"""
Notes:
- store is created based on factory who controls which pizzas are available
- Our client class (PizzaStore) doesn't need to know the creation of
concrete objects, that is, it dependence is of the abstract Pizza type
- pass the flavour (str) to a fabric and wait for a pizza
- simple factories may not be considered a design pattern
"""
from pizza import Pizza
from pizza_factory import SimplePizzaFactory
from pizza_store import PizzaStore
store: PizzaStore = PizzaStore(SimplePizzaFactory())
pizza: Pizza = store.order_pizza("cheese")
print(f"We ordered a {pizza.get_name()}")
print(pizza)
pizza = store.order_pizza("veggie")
print(f"We ordered a {pizza.get_name()}")
print(pizza)
pizza = store.order_pizza("clam")
print(f"We ordered a {pizza.get_name()}")
print(pizza)
pizza = store.order_pizza("pepperoni")
print(f"We ordered a {pizza.get_name()}")
print(pizza)
| 30.033333
| 79
| 0.744728
|
d0d1aea9164d02e6a0d1cdf5d21b2b11795df970
| 526
|
py
|
Python
|
downloader/cache.py
|
zhongxinghong/LeetCode
|
dfb2c9ced49f40937002bd09918057becf02901d
|
[
"MIT"
] | 4
|
2020-08-09T14:22:46.000Z
|
2020-10-15T09:05:15.000Z
|
downloader/cache.py
|
zhongxinghong/LeetCode
|
dfb2c9ced49f40937002bd09918057becf02901d
|
[
"MIT"
] | null | null | null |
downloader/cache.py
|
zhongxinghong/LeetCode
|
dfb2c9ced49f40937002bd09918057becf02901d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File: cache.py
# Created Date: 2020-08-07
# Author: Rabbit
# --------------------------------
# Copyright (c) 2020 Rabbit
import time
class LeetCodeClientCache(object):
def __init__(self, cookies, expires=3600 * 24 * 7):
self._cookies = cookies
self._expired_time = int(time.time()) + expires
@property
def cookies(self):
return self._cookies
@property
def is_expired(self):
return self._expired_time < int(time.time())
| 21.916667
| 55
| 0.602662
|
a4a9d78cac1351250f6f71b55d34c284cd914fa4
| 11,003
|
py
|
Python
|
robot_intelligence_student/ars_msf_state_estimator_student/source/ars_msf_state_estimator_ros.py
|
Moado/Robotics-ROS
|
c5aca2dffa6c5c9376e1cda8624ed611ffb11ca0
|
[
"MIT"
] | 1
|
2020-10-15T20:49:10.000Z
|
2020-10-15T20:49:10.000Z
|
source/ars_msf_state_estimator_ros.py
|
joselusl/ars_msf_state_estimator_student
|
1fe2796b37061c83c99b20872c10db245ba2d0fd
|
[
"BSD-3-Clause"
] | null | null | null |
source/ars_msf_state_estimator_ros.py
|
joselusl/ars_msf_state_estimator_student
|
1fe2796b37061c83c99b20872c10db245ba2d0fd
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import numpy as np
from numpy import *
import os
# ROS
import rospy
import rospkg
import std_msgs.msg
from std_msgs.msg import Header
import geometry_msgs.msg
from geometry_msgs.msg import Point
from geometry_msgs.msg import PointStamped
from geometry_msgs.msg import Quaternion
from geometry_msgs.msg import QuaternionStamped
from geometry_msgs.msg import Pose
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import PoseWithCovarianceStamped
from geometry_msgs.msg import Twist
from geometry_msgs.msg import TwistStamped
from geometry_msgs.msg import TwistWithCovarianceStamped
import tf_conversions as tf
import tf2_ros
#
from ars_msf_state_estimator import *
#
import ars_lib_helpers
class ArsMsfStateEstimatorRos:
#######
# Robot frame
robot_frame = None
# World frame
world_frame = None
# State Estim loop freq
# time step
state_estim_loop_freq = None
# Timer
state_estim_loop_timer = None
# Meas Robot posi subscriber
meas_robot_posi_sub = None
# Meas Robot atti subscriber
meas_robot_atti_sub = None
# Meas Robot velocity subscriber
meas_robot_vel_robot_sub = None
# Estim Robot pose pub
estim_robot_pose_pub = None
estim_robot_pose_cov_pub = None
# Estim Robot velocity pub
estim_robot_vel_robot_pub = None
estim_robot_vel_robot_cov_pub = None
#
estim_robot_vel_world_pub = None
estim_robot_vel_world_cov_pub = None
# tf2 broadcaster
tf2_broadcaster = None
# MSF state estimator
msf_state_estimator = None
#########
def __init__(self):
# Robot frame
self.robot_frame = 'robot_estim_base_link'
# World frame
self.world_frame = 'world'
# State Estim loop freq
# time step
self.state_estim_loop_freq = 50.0
# Motion controller
self.msf_state_estimator = ArsMsfStateEstimator()
# end
return
def init(self, node_name='ars_msf_state_estimator_node'):
#
# Init ROS
rospy.init_node(node_name, anonymous=True)
# Package path
pkg_path = rospkg.RosPack().get_path('ars_msf_state_estimator')
#### READING PARAMETERS ###
# TODO
###
# End
return
def open(self):
# Subscribers
#
self.meas_robot_posi_sub = rospy.Subscriber('meas_robot_position', PointStamped, self.measRobotPositionCallback)
#
self.meas_robot_atti_sub = rospy.Subscriber('meas_robot_attitude', QuaternionStamped, self.measRobotAttitudeCallback)
#
self.meas_robot_vel_robot_sub = rospy.Subscriber('meas_robot_velocity_robot', TwistStamped, self.measRobotVelRobotCallback)
# Publishers
#
self.estim_robot_pose_pub = rospy.Publisher('estim_robot_pose', PoseStamped, queue_size=1)
#
self.estim_robot_pose_cov_pub = rospy.Publisher('estim_robot_pose_cov', PoseWithCovarianceStamped, queue_size=1)
#
self.estim_robot_vel_robot_pub = rospy.Publisher('estim_robot_velocity_robot', TwistStamped, queue_size=1)
#
self.estim_robot_vel_robot_cov_pub = rospy.Publisher('estim_robot_velocity_robot_cov', TwistWithCovarianceStamped, queue_size=1)
#
self.estim_robot_vel_world_pub = rospy.Publisher('estim_robot_velocity_world', TwistStamped, queue_size=1)
#
self.estim_robot_vel_world_cov_pub = rospy.Publisher('estim_robot_velocity_world_cov', TwistWithCovarianceStamped, queue_size=1)
# Tf2 broadcasters
self.tf2_broadcaster = tf2_ros.TransformBroadcaster()
# Timers
#
self.state_estim_loop_timer = rospy.Timer(rospy.Duration(1.0/self.state_estim_loop_freq), self.stateEstimLoopTimerCallback)
# End
return
def run(self):
rospy.spin()
return
def measRobotPositionCallback(self, robot_position_msg):
# Timestamp
timestamp = robot_position_msg.header.stamp
# Position
robot_posi = np.zeros((3,), dtype=float)
robot_posi[0] = robot_position_msg.point.x
robot_posi[1] = robot_position_msg.point.y
robot_posi[2] = robot_position_msg.point.z
#
self.msf_state_estimator.setMeasRobotPosition(timestamp, robot_posi)
# Predict
#self.msf_state_estimator.predict(timestamp)
# Update
#self.msf_state_estimator.update()
#
return
def measRobotAttitudeCallback(self, robot_attitude_msg):
# Timestamp
timestamp = robot_attitude_msg.header.stamp
# Attitude quat simp
robot_atti_quat = ars_lib_helpers.Quaternion.zerosQuat()
robot_atti_quat[0] = robot_attitude_msg.quaternion.w
robot_atti_quat[1] = robot_attitude_msg.quaternion.x
robot_atti_quat[2] = robot_attitude_msg.quaternion.y
robot_atti_quat[3] = robot_attitude_msg.quaternion.z
robot_atti_quat_simp = ars_lib_helpers.Quaternion.getSimplifiedQuatRobotAtti(robot_atti_quat)
#
self.msf_state_estimator.setMeasRobotAttitude(timestamp, robot_atti_quat_simp)
# Predict
#self.msf_state_estimator.predict(timestamp)
# Update
#self.msf_state_estimator.update()
#
return
def measRobotVelRobotCallback(self, robot_vel_msg):
# Timestamp
timestamp = robot_vel_msg.header.stamp
# Linear
lin_vel_robot = np.zeros((3,), dtype=float)
lin_vel_robot[0] = robot_vel_msg.twist.linear.x
lin_vel_robot[1] = robot_vel_msg.twist.linear.y
lin_vel_robot[2] = robot_vel_msg.twist.linear.z
# Angular
ang_vel_robot = np.zeros((1,), dtype=float)
ang_vel_robot[0] = robot_vel_msg.twist.angular.z
#
self.msf_state_estimator.setMeasRobotVelRobot(timestamp, lin_vel_robot, ang_vel_robot)
# Predict
#self.msf_state_estimator.predict(timestamp)
# Update
#self.msf_state_estimator.update()
#
return
def estimRobotPosePublish(self):
#
header_msg = Header()
header_msg.stamp = self.msf_state_estimator.estim_state_timestamp
header_msg.frame_id = self.world_frame
#
robot_pose_msg = Pose()
#
robot_pose_msg.position.x = self.msf_state_estimator.estim_robot_posi[0]
robot_pose_msg.position.y = self.msf_state_estimator.estim_robot_posi[1]
robot_pose_msg.position.z = self.msf_state_estimator.estim_robot_posi[2]
#
robot_pose_msg.orientation.w = self.msf_state_estimator.estim_robot_atti_quat_simp[0]
robot_pose_msg.orientation.x = 0.0
robot_pose_msg.orientation.y = 0.0
robot_pose_msg.orientation.z = self.msf_state_estimator.estim_robot_atti_quat_simp[1]
#
# Covariance
covariance_pose = np.zeros((6,6), dtype=float)
# Position - Position
covariance_pose[0:3, 0:3] = self.msf_state_estimator.estim_state_cov[0:3, 0:3]
# Position - Attitude
covariance_pose[0:3, 5] = self.msf_state_estimator.estim_state_cov[0:3, 3]
# Attitude - Attitude
covariance_pose[5, 5] = self.msf_state_estimator.estim_state_cov[3, 3]
# Attitude - Position
covariance_pose[5, 0:3] = self.msf_state_estimator.estim_state_cov[3, 0:3]
#
robot_pose_stamped_msg = PoseStamped()
#
robot_pose_stamped_msg.header = header_msg
robot_pose_stamped_msg.pose = robot_pose_msg
#
robot_pose_cov_stamped_msg = PoseWithCovarianceStamped()
#
robot_pose_cov_stamped_msg.header = header_msg
robot_pose_cov_stamped_msg.pose.pose = robot_pose_msg
robot_pose_cov_stamped_msg.pose.covariance = covariance_pose.reshape((36,))
#
self.estim_robot_pose_pub.publish(robot_pose_stamped_msg)
#
self.estim_robot_pose_cov_pub.publish(robot_pose_cov_stamped_msg)
# Tf2
tf2__msg = geometry_msgs.msg.TransformStamped()
tf2__msg.header.stamp = self.msf_state_estimator.estim_state_timestamp
tf2__msg.header.frame_id = self.world_frame
tf2__msg.child_frame_id = self.robot_frame
tf2__msg.transform.translation.x = self.msf_state_estimator.estim_robot_posi[0]
tf2__msg.transform.translation.y = self.msf_state_estimator.estim_robot_posi[1]
tf2__msg.transform.translation.z = self.msf_state_estimator.estim_robot_posi[2]
tf2__msg.transform.rotation.w = self.msf_state_estimator.estim_robot_atti_quat_simp[0]
tf2__msg.transform.rotation.x = 0.0
tf2__msg.transform.rotation.y = 0.0
tf2__msg.transform.rotation.z = self.msf_state_estimator.estim_robot_atti_quat_simp[1]
# Broadcast
self.tf2_broadcaster.sendTransform(tf2__msg)
# End
return
def estimRobotVelocityPublish(self):
# TODO Finish!
#
header_wrt_world_msg = Header()
header_wrt_world_msg.stamp = self.msf_state_estimator.estim_state_timestamp
header_wrt_world_msg.frame_id = self.world_frame
#
header_wrt_robot_msg = Header()
header_wrt_robot_msg.stamp = self.msf_state_estimator.estim_state_timestamp
header_wrt_robot_msg.frame_id = self.robot_frame
#
robot_velocity_world_msg = Twist()
#
robot_velocity_world_msg.linear.x = self.msf_state_estimator.estim_robot_velo_lin_world[0]
robot_velocity_world_msg.linear.y = self.msf_state_estimator.estim_robot_velo_lin_world[1]
robot_velocity_world_msg.linear.z = self.msf_state_estimator.estim_robot_velo_lin_world[2]
#
robot_velocity_world_msg.angular.x = 0.0
robot_velocity_world_msg.angular.y = 0.0
robot_velocity_world_msg.angular.z = self.msf_state_estimator.estim_robot_velo_ang_world[0]
#
# TODO Cov
#
# TODO wrt robot
#
robot_velocity_world_stamp_msg = TwistStamped()
robot_velocity_world_stamp_msg.header = header_wrt_world_msg
robot_velocity_world_stamp_msg.twist = robot_velocity_world_msg
#
robot_velocity_world_cov_stamp_msg = TwistWithCovarianceStamped()
robot_velocity_world_cov_stamp_msg.header = header_wrt_world_msg
robot_velocity_world_cov_stamp_msg.twist.twist = robot_velocity_world_msg
# robot_velocity_world_cov_stamp_msg.twist.covariance
#
# TODO
robot_velocity_robot_stamp_msg = TwistStamped()
robot_velocity_robot_stamp_msg.header = header_wrt_robot_msg
#robot_velocity_robot_stamp_msg.twist = robot_velocity_robot_msg
#
# TODO
robot_velocity_robot_cov_stamp_msg = TwistWithCovarianceStamped()
robot_velocity_robot_cov_stamp_msg.header = header_wrt_robot_msg
#robot_velocity_robot_cov_stamp_msg.twist.twist = robot_velocity_robot_msg
# robot_velocity_robot_cov_stamp_msg.twist.covariance
#
self.estim_robot_vel_world_pub.publish(robot_velocity_world_stamp_msg)
#
self.estim_robot_vel_world_cov_pub.publish(robot_velocity_world_cov_stamp_msg)
#
self.estim_robot_vel_robot_pub.publish(robot_velocity_robot_stamp_msg)
#
self.estim_robot_vel_robot_cov_pub.publish(robot_velocity_robot_cov_stamp_msg)
#
return
def stateEstimLoopTimerCallback(self, timer_msg):
# Get time
time_stamp_current = rospy.Time.now()
# Predict
self.msf_state_estimator.predict(time_stamp_current)
# Update
self.msf_state_estimator.update()
# Publish
#
self.estimRobotPosePublish()
#
self.estimRobotVelocityPublish()
# End
return
| 25.17849
| 132
| 0.751341
|
d4cf87fe2697bc579218c452197ad564e1215df2
| 1,713
|
py
|
Python
|
dblib/dbUpdate.py
|
DepthDeluxe/dot11sniffer
|
4c283122e158a854b940136a9675d3e0515ee219
|
[
"MIT"
] | 4
|
2016-11-29T16:09:54.000Z
|
2021-11-12T10:34:49.000Z
|
dblib/dbUpdate.py
|
DepthDeluxe/dot11sniffer
|
4c283122e158a854b940136a9675d3e0515ee219
|
[
"MIT"
] | null | null | null |
dblib/dbUpdate.py
|
DepthDeluxe/dot11sniffer
|
4c283122e158a854b940136a9675d3e0515ee219
|
[
"MIT"
] | 1
|
2021-11-12T10:34:49.000Z
|
2021-11-12T10:34:49.000Z
|
import pymongo
import multiprocessing
import multiprocessing.connection
import time
SIZE = 128
NUM_NODES = 3
def recv_data(sock,dataQueue,cQueue):
connect = sock.accept()
cQueue.put("listen")
data = connect.recv()
dataQueue.put(data)
connect.close()
print("received data")
exit(0)
def db_send(database,queue):
collection = database.times
t = int(time.time())
doc = int(t/600)
for i in range(queue.qsize()):
data = queue.get()
data = data.split(',')
for j in range(0,len(data)-3,4):
new_posts = {}
new_posts.update({'data':{"mac":data[j+3],'node':data[0],'time':int(data[j+1]),'sigstr':int(data[j+2])}})
collection.update({'_id':doc},{"$addToSet":new_posts},upsert=True)
## dic = {'node':temp[0],'time':temp[1],'sigstr':temp[2],'mac':temp[3]}
## new_posts.append(dic)
## posts.insert_many(new_posts)
print("sent")
exit(0)
def server(host,port):
client = pymongo.MongoClient()
db = client.cheddar
sock = multiprocessing.connection.Listener((host,port))
dq = multiprocessing.Queue()
cq = multiprocessing.Queue()
cq.put("listen")
while True:
try:
task = cq.get(True,1)
except:
task = "none"
if task == "listen":
print("spawning listening thread")
p = multiprocessing.Process(target=recv_data, args=(sock,dq,cq))
p.start()
## if (dq.qsize() == 100):
if dq.qsize() != 0:
print("spawning sending thread")
p = multiprocessing.Process(target=db_send,args=(db,dq))
p.start()
## pass
server('',10000)
| 27.190476
| 117
| 0.572096
|
b370c45e01e7bfb6e0491b4cd3e70c6f646c1667
| 77
|
py
|
Python
|
openwisp_network_topology/management/commands/update_topology.py
|
DaffyTheDuck/openwisp-network-topology
|
a8c9212f0d9cca76f83b41af0e3fc89330f408bb
|
[
"BSD-3-Clause"
] | 105
|
2017-06-14T06:06:16.000Z
|
2022-03-29T18:50:38.000Z
|
openwisp_network_topology/management/commands/update_topology.py
|
DaffyTheDuck/openwisp-network-topology
|
a8c9212f0d9cca76f83b41af0e3fc89330f408bb
|
[
"BSD-3-Clause"
] | 127
|
2017-06-02T08:19:13.000Z
|
2022-03-18T00:26:13.000Z
|
openwisp_network_topology/management/commands/update_topology.py
|
ManishShah120/openwisp-network-topology
|
0ed720eff1eb733a00cdbfc83292f16fe7d56e12
|
[
"BSD-3-Clause"
] | 62
|
2017-06-21T10:28:10.000Z
|
2022-03-31T22:06:09.000Z
|
from . import BaseUpdateCommand
class Command(BaseUpdateCommand):
pass
| 12.833333
| 33
| 0.779221
|
48c276f5e53c24058590b50a794d100208b336f8
| 2,178
|
py
|
Python
|
examples/market_maker/on_chain_market_maker.v.py
|
rohithpr/vyper
|
f05ff1b3d91be3f6b5b9afb843ed4e2e6fcabf77
|
[
"MIT"
] | null | null | null |
examples/market_maker/on_chain_market_maker.v.py
|
rohithpr/vyper
|
f05ff1b3d91be3f6b5b9afb843ed4e2e6fcabf77
|
[
"MIT"
] | null | null | null |
examples/market_maker/on_chain_market_maker.v.py
|
rohithpr/vyper
|
f05ff1b3d91be3f6b5b9afb843ed4e2e6fcabf77
|
[
"MIT"
] | null | null | null |
units: {
currency_value: "a currency"
}
total_eth_qty: public(wei_value)
total_token_qty: public(uint256(currency_value))
# Constant set in `initiate` that's used to calculate
# the amount of ether/tokens that are exchanged
invariant: public(uint256(wei * currency_value))
token_address: address(ERC20)
owner: public(address)
# Sets the on chain market maker with its owner, intial token quantity,
# and initial ether quantity
@public
@payable
def initiate(token_addr: address, token_quantity: uint256(currency_value)):
assert self.invariant == 0
self.token_address = token_addr
self.token_address.transferFrom(msg.sender, self, as_unitless_number(token_quantity))
self.owner = msg.sender
self.total_eth_qty = msg.value
self.total_token_qty = token_quantity
self.invariant = msg.value * token_quantity
assert self.invariant > 0
# Sells ether to the contract in exchange for tokens (minus a fee)
@public
@payable
def eth_to_tokens():
fee: wei_value = msg.value / 500
eth_in_purchase: wei_value = msg.value - fee
new_total_eth: wei_value = self.total_eth_qty + eth_in_purchase
new_total_tokens: uint256(currency_value) = self.invariant / new_total_eth
self.token_address.transfer(msg.sender, as_unitless_number(self.total_token_qty - new_total_tokens))
self.total_eth_qty = new_total_eth
self.total_token_qty = new_total_tokens
# Sells tokens to the contract in exchange for ether
@public
def tokens_to_eth(sell_quantity: uint256(currency_value)):
self.token_address.transferFrom(msg.sender, self, as_unitless_number(sell_quantity))
new_total_tokens: uint256(currency_value) = self.total_token_qty + sell_quantity
new_total_eth: wei_value = self.invariant / new_total_tokens
eth_to_send: wei_value = self.total_eth_qty - new_total_eth
send(msg.sender, eth_to_send)
self.total_eth_qty = new_total_eth
self.total_token_qty = new_total_tokens
# Owner can withdraw their funds and destroy the market maker
@public
def owner_withdraw():
assert self.owner == msg.sender
self.token_address.transfer(self.owner, as_unitless_number(self.total_token_qty))
selfdestruct(self.owner)
| 38.892857
| 104
| 0.77686
|
fcb97bd400bd67d926b7315ba309a39ba1218971
| 2,114
|
py
|
Python
|
sjb/graph.py
|
brenton/aos-cd-jobs
|
34e427bb7091c52791bc93a34f062e57dc005082
|
[
"Apache-2.0"
] | 45
|
2017-05-09T15:49:06.000Z
|
2021-11-07T19:48:35.000Z
|
sjb/graph.py
|
brenton/aos-cd-jobs
|
34e427bb7091c52791bc93a34f062e57dc005082
|
[
"Apache-2.0"
] | 1,313
|
2017-01-19T13:40:43.000Z
|
2022-03-30T14:25:44.000Z
|
sjb/graph.py
|
brenton/aos-cd-jobs
|
34e427bb7091c52791bc93a34f062e57dc005082
|
[
"Apache-2.0"
] | 165
|
2017-01-17T22:19:04.000Z
|
2022-03-02T12:15:13.000Z
|
from __future__ import print_function
from os import listdir
from os.path import isfile, join
import yaml
import json
def getGraphvizDotFormat(graph):
nodes = graph.nodes()
edges = graph.edges()
content = "digraph {\n"
for u in nodes:
if u in edges:
for v in edges[u]:
content += "\"%s\" -> \"%s\";\n" % (u.replace('-', '_'), v.replace('-', '_'))
for node in nodes:
content += "\"%s\" [style=filled, fillcolor=orange]\n" % node.replace('-', '_')
content += "}\n"
return content
class Graph(object):
def __init__(self, nodes = set([]), edges = {}):
self._nodes = nodes
self._edges = edges
def nodes(self):
return self._nodes
def edges(self):
return self._edges
def __str__(self):
return json.dumps({
"nodes": list(self._nodes),
"edges": self._edges
})
def name2node(name):
if name.startswith("common/test_cases"):
name = "C/TC/\n{}".format(name[18:])
if name.startswith("common/test_suites"):
name = "C/TS/\n{}".format(name[19:])
if name.startswith("test_cases"):
name = "TC/\n{}".format(name[11:])
if name.startswith("test_suites"):
name = "TS\n/{}".format(name[12:])
if name.endswith(".yml"):
return name[:-4]
return name
def constructSubgraph(dir, graph):
nodes = graph.nodes()
edges = graph.edges()
for f in listdir(dir):
filepath = join(dir, f)
if not isfile(filepath):
continue
node = name2node(filepath)
nodes.add(node)
with open(filepath, "r") as f:
data = yaml.load(f)
if "children" in data:
for child in data["children"]:
childpath = join("test_cases", child)
childnode = name2node(childpath)
nodes.add(childnode)
try:
edges[node].append(childnode)
except KeyError:
edges[node] = [childnode]
if "parent" in data:
parentnode = name2node(data["parent"])
try:
edges[node].append(parentnode)
except KeyError:
edges[node] = [parentnode]
graph = Graph()
constructSubgraph("common/test_suites", graph)
constructSubgraph("common/test_cases", graph)
constructSubgraph("test_suites", graph)
constructSubgraph("test_cases", graph)
print(getGraphvizDotFormat(graph))
| 21.571429
| 81
| 0.66036
|
a36907e5163eeed0454b24e80b58be258e660c11
| 9,142
|
py
|
Python
|
gluon/packages/dal/pydal/representers/__init__.py
|
guadaltech/web2py-ruben
|
45e0f4f316774e707a3075f23e3f8b9fed00c387
|
[
"BSD-3-Clause"
] | null | null | null |
gluon/packages/dal/pydal/representers/__init__.py
|
guadaltech/web2py-ruben
|
45e0f4f316774e707a3075f23e3f8b9fed00c387
|
[
"BSD-3-Clause"
] | null | null | null |
gluon/packages/dal/pydal/representers/__init__.py
|
guadaltech/web2py-ruben
|
45e0f4f316774e707a3075f23e3f8b9fed00c387
|
[
"BSD-3-Clause"
] | null | null | null |
from collections import defaultdict
from .._compat import PY2, with_metaclass, iteritems, to_unicode, to_bytes, \
string_types
from .._gae import gae
from ..helpers._internals import Dispatcher
from ..helpers.regex import REGEX_TYPE
representers = Dispatcher("representer")
class for_type(object):
def __init__(self, field_type, encode=False, adapt=True):
self.field_type = field_type
self.encode = encode
self.adapt = adapt
def __call__(self, f):
self.f = f
return self
class before_type(object):
def __init__(self, field_type):
self.field_type = field_type
def __call__(self, f):
self.f = f
return self
class for_instance(object):
def __init__(self, inst_type, repr_type=False):
self.inst_type = inst_type
self.repr_type = repr_type
def __call__(self, f):
self.f = f
return self
class pre(object):
_inst_count_ = 0
def __init__(self, is_breaking=None):
self.breaking = is_breaking
self._inst_count_ = pre._inst_count_
pre._inst_count_ += 1
def __call__(self, f):
self.f = f
return self
class MetaRepresenter(type):
def __new__(cls, name, bases, attrs):
new_class = type.__new__(cls, name, bases, attrs)
if bases == (object,):
return new_class
#: collect declared attributes
trepresenters = {}
irepresenters = {}
tbefore = {}
pres = {}
for key, value in list(attrs.items()):
if isinstance(value, for_type):
trepresenters[key] = value
elif isinstance(value, before_type):
tbefore[key] = value
elif isinstance(value, for_instance):
irepresenters[key] = value
elif isinstance(value, pre):
pres[key] = value
#: get super declared attributes
declared_trepresenters = {}
declared_irepresenters = {}
declared_tbefore = {}
declared_pres = {}
for base in reversed(new_class.__mro__[1:]):
if hasattr(base, '_declared_trepresenters_'):
declared_trepresenters.update(base._declared_trepresenters_)
if hasattr(base, '_declared_irepresenters_'):
declared_irepresenters.update(base._declared_irepresenters_)
if hasattr(base, '_declared_tbefore_'):
declared_tbefore.update(base._declared_tbefore_)
if hasattr(base, '_declared_pres_'):
declared_pres.update(base._declared_pres_)
#: set trepresenters
declared_trepresenters.update(trepresenters)
declared_irepresenters.update(irepresenters)
declared_tbefore.update(tbefore)
declared_pres.update(pres)
new_class._declared_trepresenters_ = declared_trepresenters
new_class._declared_irepresenters_ = declared_irepresenters
new_class._declared_tbefore_ = declared_tbefore
new_class._declared_pres_ = declared_pres
return new_class
class TReprMethodWrapper(object):
def __init__(self, representer, obj, extra=None):
self.representer = representer
self.obj = obj
if extra:
self.extra = extra
self.call = self._call_with_extras
else:
self.call = self._call
if self.obj.encode and PY2:
self.inner_call = self._inner_call_with_encode
else:
self.inner_call = self._inner_call
if self.obj.adapt:
self.adapt = self._adapt
else:
self.adapt = self._no_adapt
def _adapt(self, value):
return self.representer.adapt(value)
def _no_adapt(self, value):
return value
def _inner_call(self, value, **kwargs):
return self.obj.f(self.representer, value, **kwargs)
def _inner_call_with_encode(self, value, **kwargs):
if isinstance(value, unicode):
value = value.encode(self.representer.adapter.db_codec)
return self.obj.f(self.representer, value, **kwargs)
def _call_with_extras(self, value, field_type):
extras = self.extra(self.representer, field_type)
return self.inner_call(value, **extras)
def _call(self, value, field_type):
return self.inner_call(value)
def __call__(self, value, field_type):
return self.adapt(self.call(value, field_type))
class IReprMethodWrapper(object):
def __init__(self, representer, obj):
self.representer = representer
self.obj = obj
def __call__(self, value, field_type):
rv = self.obj.f(self.representer, value, field_type)
return self.obj.repr_type, rv
class PreMethodWrapper(object):
def __init__(self, representer, obj):
self.representer = representer
self.obj = obj
if self.obj.breaking is None:
self.call = self._call_autobreak
elif self.obj.breaking == True:
self.call = self._call_break
else:
self.call = self._call_nobreak
def _call_autobreak(self, value, field_type):
rv = self.obj.f(self.representer, value, field_type)
if rv is not None:
return True, rv
return False, value
def _call_break(self, value, field_type):
return self.obj.f(
self.representer, value, field_type)
def _call_nobreak(self, value, field_type):
return False, self.obj.f(self.representer, value, field_type)
def __call__(self, value, field_type):
return self.call(value, field_type)
class Representer(with_metaclass(MetaRepresenter)):
def __init__(self, adapter):
self.adapter = adapter
self.dialect = adapter.dialect
self._tbefore_registry_ = {}
for name, obj in iteritems(self._declared_tbefore_):
self._tbefore_registry_[obj.field_type] = obj.f
self.registered_t = defaultdict(lambda self=self: self._default)
for name, obj in iteritems(self._declared_trepresenters_):
if obj.field_type in self._tbefore_registry_:
self.registered_t[obj.field_type] = TReprMethodWrapper(
self, obj, self._tbefore_registry_[obj.field_type]
)
else:
self.registered_t[obj.field_type] = TReprMethodWrapper(
self, obj
)
self.registered_i = {}
for name, obj in iteritems(self._declared_irepresenters_):
self.registered_i[obj.inst_type] = IReprMethodWrapper(self, obj)
self._pre_registry_ = []
pres = []
for name, obj in iteritems(self._declared_pres_):
pres.append(obj)
pres.sort(key=lambda x: x._inst_count_)
for pre in pres:
self._pre_registry_.append(PreMethodWrapper(self, pre))
def _default(self, value, field_type):
return self.adapt(value)
def _default_instance(self, value, field_type):
return True, value
def get_representer_for_instance(self, value):
for inst, representer in iteritems(self.registered_i):
if isinstance(value, inst):
return representer
return self._default_instance
def get_representer_for_type(self, field_type):
key = REGEX_TYPE.match(field_type).group(0)
return self.registered_t[key]
def adapt(self, value):
if PY2:
if not isinstance(value, string_types):
value = str(value)
value = to_bytes(value)
try:
value.decode(self.adapter.db_codec)
except:
value = value.decode('latin1').encode(self.adapter.db_codec)
else:
value = to_unicode(value)
return self.adapter.adapt(value)
def exceptions(self, value, field_type):
return None
def represent(self, value, field_type):
pre_end = False
for pre in self._pre_registry_:
pre_end, value = pre(value, field_type)
if pre_end:
break
if pre_end:
return value
repr_type, rv = self.get_representer_for_instance(value)(
value, field_type)
if repr_type:
rv = self.get_representer_for_type(field_type)(rv, field_type)
return rv
from .base import BaseRepresenter, SQLRepresenter, NoSQLRepresenter
from .sqlite import SQLiteRepresenter, SpatialiteRepresenter
from .postgre import PostgreRepresenter
from .mysql import MySQLRepresenter
from .mssql import MSSQLRepresenter
from .mongo import MongoRepresenter
from .db2 import DB2Representer
from .informix import InformixRepresenter
from .oracle import OracleRepresenter
from .couchdb import CouchDBRepresenter
if gae is not None:
from .google import GoogleDatastoreRepresenter
| 34.11194
| 78
| 0.620324
|
6f54ea7a9bb4cbb98b6a652dd8fae11258f3e677
| 264
|
py
|
Python
|
avalanche/evaluation/metrics/__init__.py
|
lrzpellegrini/avalanche_pre_public
|
522019a55ce08b92c1ec74b508a8ea6ae8751dfd
|
[
"MIT"
] | 12
|
2021-04-16T15:49:59.000Z
|
2022-02-27T18:04:58.000Z
|
avalanche/evaluation/metrics/__init__.py
|
lrzpellegrini/avalanche_pre_public
|
522019a55ce08b92c1ec74b508a8ea6ae8751dfd
|
[
"MIT"
] | null | null | null |
avalanche/evaluation/metrics/__init__.py
|
lrzpellegrini/avalanche_pre_public
|
522019a55ce08b92c1ec74b508a8ea6ae8751dfd
|
[
"MIT"
] | 2
|
2021-06-22T04:11:52.000Z
|
2021-11-12T03:27:18.000Z
|
from .mean import *
from .accuracy import *
from .confusion_matrix import *
from .cpu_usage import *
from .disk_usage import *
from .forgetting import *
from .gpu_usage import *
from .loss import *
from .mac import *
from .ram_usage import *
from .timing import *
| 22
| 31
| 0.75
|
6e8b15878b8d8b90a79f6557b61b5d99716eb6e6
| 3,281
|
py
|
Python
|
sign.py
|
FOULAB/foubot-modules
|
87423c4f4036b1e5d31d33e54a1a0b82f30e27ba
|
[
"BSD-3-Clause"
] | 2
|
2019-10-04T23:12:20.000Z
|
2019-10-21T23:43:45.000Z
|
sign.py
|
FOULAB/foubot-modules
|
87423c4f4036b1e5d31d33e54a1a0b82f30e27ba
|
[
"BSD-3-Clause"
] | 3
|
2016-02-10T03:21:57.000Z
|
2019-07-22T15:31:03.000Z
|
sign.py
|
FOULAB/foubot-modules
|
87423c4f4036b1e5d31d33e54a1a0b82f30e27ba
|
[
"BSD-3-Clause"
] | 2
|
2016-12-03T04:47:11.000Z
|
2019-10-21T23:43:46.000Z
|
from ibid.plugins import Processor, match, periodic, handler
import ibid.source.irc
import ibid
from ledsign import LEDSign
import subprocess
import datetime
features = {'sign': {
'description': u'Displays messages on the Foulab LED sign.',
'categories': ('message',),
}}
lab_channel = u"##foulab"
#Monkey patch topic tracking into the ibid Ircbot library
ibid.source.irc.Ircbot.topics = {}
def topicUpdated( self, user, channel, topic ):
self.topics[ channel ] = topic
print "Topic for %s is %s, set by %s" % ( channel, topic, user )
def get_topic( self, channel ):
return self.topics[ channel ]
ibid.source.irc.Ircbot.topicUpdated = topicUpdated
ibid.source.irc.Ircbot.get_topic = get_topic
beepdisc = {
"SIGN" : "beep -f1 -l150 -r10 -d250".split(),
"SECRET" : "beep -l145 -f14.75 -n -l130 -f13.96 -n -l135 -f11.74 -n -l135 -f9.89 -n -l130 -f7.87 -n -l135 -f12.55 -n -l135 -f15.63 -n -l265 -f19.90".split(),
"OCARINA" : "beep -l500 -f44 -n -l1000 -f29.5 -n -l500 -f35 -n -l500 -f44 -n -l1000 -f29.5 -n -l500 -f35 -n -l250 -f44 -n -l250 -f52 -n -l500 -f49 -n -l500 -f39 -n -l250 -f35.5 -n -l250 -f39 -n -l500 -f44 -n -l500 -f29 -n -l250 -f26 -n -l250 -f32.5 -n -l750 -f29".split(),
}
class Sign( Processor ):
usage = u'!sign <message>'
feature = ('sign',)
addressed = False
old_status = 0
def __init__( self, name ):
Processor.__init__( self, name )
self.s = LEDSign()
@handler
def handle( self, event ):
print "I am handling this event: " + repr(event)
@match(r'^!sign\s(.*)$', version='deaddressed')
def sign( self, event, message ):
try:
p = subprocess.Popen( beepdisc["SIGN"] )
p.wait()
self.s.print_message( 0, ''.join(["From: ", event.sender['nick'], datetime.datetime.today().strftime(" %d%b %H:%m") ] ) )
self.s.print_message( 1, message )
event.addresponse( True )
except Exception:
event.addresponse( "I can't see the sign. Are you sure it's plugged in? Have you tried turning it off and on again?" )
@match(r'^!status$')
def status( self, event ):
try:
status = self.s.get_status()
if status:
event.addresponse("open")
else:
event.addresponse("closed")
except Exception:
event.addresponse( "I can't see the sign. Are you sure it's plugged in? Have you tried turning it off and on again?" )
@periodic( interval = 1, initial_delay = 1 )
def update_status( self, event ):
status = self.s.get_status()
if( status != self.old_status ):
topic = ibid.sources['freenode'].proto.get_topic( lab_channel )
topic_parts = topic.split( u"//" )
topiclen = len( topic_parts )
pos = 0
while pos < topiclen:
if "Lab status: " in topic_parts[pos]:
break
pos += 1
if pos == topiclen:
topic_parts.append("")
topic_parts[ pos ] = " Lab status: OPEN " if status else " Lab status: CLOSED "
event.addresponse( u"//".join( topic_parts ), topic = True, target = lab_channel, address = False, source = u"freenode" )
self.old_status = status
alert = "SECRET" if status else "OCARINA"
p = subprocess.Popen(beepdisc[alert])
p.wait()
| 32.166667
| 287
| 0.613228
|
90f4af7e3ea6291dff145d4e87024f95729c330d
| 187
|
py
|
Python
|
pltform/swami/five_thirty_eight.py
|
crashka/bfp
|
211a224628659d7b69e30ff108b6d92ea76fbef1
|
[
"MIT"
] | null | null | null |
pltform/swami/five_thirty_eight.py
|
crashka/bfp
|
211a224628659d7b69e30ff108b6d92ea76fbef1
|
[
"MIT"
] | null | null | null |
pltform/swami/five_thirty_eight.py
|
crashka/bfp
|
211a224628659d7b69e30ff108b6d92ea76fbef1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .ext_data import SwamiExtData
class SwamiFiveThirtyEight(SwamiExtData):
"""History of predictions from the fivethirtyeight.com website
"""
pass
| 20.777778
| 66
| 0.71123
|
6555497d3b1d02c8c5a3ee7456d0aa919ce8ab21
| 6,075
|
py
|
Python
|
mogptk/gpr/parameter.py
|
vishalbelsare/mogptk
|
4f7001fbfacea778bd62a1e4e6c5b404c473e313
|
[
"MIT"
] | null | null | null |
mogptk/gpr/parameter.py
|
vishalbelsare/mogptk
|
4f7001fbfacea778bd62a1e4e6c5b404c473e313
|
[
"MIT"
] | null | null | null |
mogptk/gpr/parameter.py
|
vishalbelsare/mogptk
|
4f7001fbfacea778bd62a1e4e6c5b404c473e313
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn.functional as functional
from .config import config
class Transform:
def forward(self, x):
# unconstrained to constrained space
raise NotImplementedError()
def inverse(self, y):
# constrained to unconstrained space
raise NotImplementedError()
class Softplus(Transform):
def __init__(self, lower=0.0, beta=0.1, threshold=20.0):
self.beta = beta
self.lower = lower
self.threshold = threshold
def forward(self, x):
return self.lower + functional.softplus(x, beta=self.beta, threshold=self.threshold)
def inverse(self, y):
if torch.any(y < self.lower):
raise ValueError("values must be at least %s" % self.lower)
return y-self.lower + torch.log(-torch.expm1(-self.beta*(y-self.lower)))/self.beta
class Sigmoid(Transform):
def __init__(self, lower=0.0, upper=1.0):
self.lower = lower
self.upper = upper
def forward(self, x):
return self.lower + (self.upper-self.lower)*torch.sigmoid(x)
def inverse(self, y):
if torch.any(y < self.lower) or torch.any(self.upper < y):
raise ValueError("values must be between %s and %s" % (self.lower, self.upper))
y = (y-self.lower)/(self.upper-self.lower)
return torch.log(y) - torch.log(1-y)
class Parameter:
def __init__(self, value, name=None, lower=None, upper=None, prior=None, trainable=True):
self.name = name
self.lower = None
self.upper = None
self.prior = prior
self.trainable = trainable
self.transform = None
self.unconstrained = None
self.assign(value, lower=lower, upper=upper)
def __repr__(self):
if self.name is None:
return '{}'.format(self.constrained.tolist())
return '{}={}'.format(self.name, self.constrained.tolist())
def __call__(self):
return self.constrained
@property
def constrained(self):
if self.transform is not None:
return self.transform.forward(self.unconstrained)
return self.unconstrained
def numpy(self):
return self.constrained.detach().cpu().numpy()
def assign(self, value=None, name=None, lower=None, upper=None, prior=None, trainable=None):
if value is not None:
if not isinstance(value, torch.Tensor):
value = torch.tensor(value, device=config.device, dtype=config.dtype)
else:
value = value.detach().to(config.device, config.dtype)
if self.unconstrained is not None:
origshape = value.shape
while len(value.shape) < len(self.unconstrained.shape) and self.unconstrained.shape[len(value.shape)] == 1:
value = value.unsqueeze(-1)
while len(self.unconstrained.shape) < len(value.shape) and value.shape[-1] == 1:
value = value.squeeze(-1)
if value.shape != self.unconstrained.shape:
raise ValueError("parameter shape must match: %s != %s" % (origshape, self.unconstrained.shape))
else:
value = self.unconstrained.detach()
if lower is not None:
if not isinstance(lower, torch.Tensor):
lower = torch.tensor(lower, device=config.device, dtype=config.dtype)
else:
lower = lower.detach().to(config.device, config.dtype)
if len(lower.shape) != 0:
while len(lower.shape) < len(value.shape) and value.shape[len(lower.shape)] == 1:
lower = lower.unsqueeze(-1)
while len(value.shape) < len(lower.shape) and lower.shape[-1] == 1:
lower = lower.squeeze(-1)
if lower.shape != value.shape:
raise ValueError("lower and value must match shapes: %s != %s" % (lower.shape, value.shape))
else:
lower = self.lower
if upper is not None:
if not isinstance(upper, torch.Tensor):
upper = torch.tensor(upper, device=config.device, dtype=config.dtype)
else:
upper = upper.detach().to(config.device, config.dtype)
if len(upper.shape) != 0:
while len(upper.shape) < len(value.shape) and value.shape[len(upper.shape)] == 1:
upper = upper.unsqueeze(-1)
while len(value.shape) < len(upper.shape) and upper.shape[-1] == 1:
upper = upper.squeeze(-1)
if upper.shape != value.shape:
raise ValueError("upper and value must match shapes: %s != %s" % (upper.shape, value.shape))
else:
upper = self.upper
if name is None:
name = self.name
if prior is None:
prior = self.prior
if trainable is None:
trainable = self.trainable
transform = None
if lower is not None and upper is not None:
lower_tmp = torch.min(lower, upper)
upper = torch.max(lower, upper)
lower = lower_tmp
transform = Sigmoid(lower=lower, upper=upper)
elif lower is not None:
transform = Softplus(lower=lower)
elif upper is not None:
transform = Softplus(lower=upper, beta=-1.0)
if transform is not None:
if lower is not None:
value = torch.where(value < lower, lower * torch.ones_like(value), value)
if upper is not None:
value = torch.where(upper < value, upper * torch.ones_like(value), value)
value = transform.inverse(value)
value.requires_grad = True
self.name = name
self.prior = prior
self.lower = lower
self.upper = upper
self.trainable = trainable
self.transform = transform
self.unconstrained = value
def log_prior(self):
if self.prior is None:
return 0.0
return self.prior.log_prob(self()).sum()
| 38.207547
| 123
| 0.579753
|
e4e6a26046f32847e9c7df01bb03509bf3b76bda
| 7,747
|
py
|
Python
|
old/app/main.py
|
lucycore/hlby_web
|
c50c361656501196bc620aaaa5c075278001c350
|
[
"MIT"
] | null | null | null |
old/app/main.py
|
lucycore/hlby_web
|
c50c361656501196bc620aaaa5c075278001c350
|
[
"MIT"
] | null | null | null |
old/app/main.py
|
lucycore/hlby_web
|
c50c361656501196bc620aaaa5c075278001c350
|
[
"MIT"
] | null | null | null |
import os
from urllib.parse import unquote
from flask import Flask, render_template, session, redirect, url_for, flash,request, g
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField,PasswordField
from wtforms.validators import DataRequired
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, UserMixin, login_required
from flask_login import logout_user, login_user
#这是一个黑利博瑞小型图文消息程序
#初始化登陆
login_manager = LoginManager()
#初始化路径
basedir = os.path.abspath(os.path.dirname(__file__))
#初始化app
app = Flask(__name__)
#初始化数据库设置
app.config['SECRET_KEY'] = 'hard to guess string'
app.config['JSON_AS_ASCII'] = False
app.config['SQLALCHEMY_DATABASE_URI'] =\
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
#初始化登陆安全配置
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
login_manager.init_app(app)
app.config['SECRET_KEY']='qazxswqwedsazxc'
#初始化
bootstrap = Bootstrap(app)
moment = Moment(app)
db = SQLAlchemy(app)
#数据库---------模型声明----------
#导入登陆模块
class Years(db.Model):
#用于保存用户名以及密码的模型
__tablename__ = 'years'
#设置id表头
id = db.Column(db.Integer, primary_key = True)
year = db.Column(db.String(64), unique=True, index=True)
describe = db.Column(db.Text)
photo = db.Column(db.String(64))
#外键被链接
dets = db.relationship('Det', backref='role')
#返回函数
def __repr__(self):
return "%r*%r*%r" % (self.year,self.describe,self.photo)
class Det(db.Model):
#用于保存照片的模型
__tablename__ = 'det'
#设置id表头
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(64), unique=True, index=True)
describe = db.Column(db.Text)
photo = db.Column(db.String(64))
body = db.Column(db.Text)
#设定外键(有可能这个注释是错误的)
year_id = db.Column(db.Integer, db.ForeignKey('years.id'))
#返回函数
def __repr__(self):
return "%r*%r*%r*%r" % (self.name,self.describe,self.photo,self.body)
#数据库---------模型声明----------
#url映射 ------ ------- ------- ------- --------
#错误反馈
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
#错误反馈
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
#主页映射路由
@app.route('/')
def index():
#获取数据库中所有年度信息
a = Years.query.all()
z_list = []
for x in a:
#循环分割数据
a = str(x)
b = a.split("*")
wcl_list = []
for y in b :
c = y.strip('\'')
wcl_list.append(c)
z_list.append(wcl_list)
return render_template('index.html',lista = z_list)
#活动选择路由
@app.route('/years/<v>/')
def year(v):
#年度学期页面生成 self.year,self.describe,self.photo
#self.activity,self.describe,self.photo
#传入参数v进行year的查询,并查询关于活动具体图文的外链
a = Years.query.filter_by(year=v).first()
b = Det.query.filter_by(role=a).all()
z_list = []
for x in b:
a = str(x)
b = a.split("*")
wcl_list = []
for y in b :
c = y.strip('\'')
wcl_list.append(c)
z_list.append(wcl_list)
return render_template('one.html',lista = z_list)
#具体图文路由
@app.route('/det/<v>/')
def det(v):
#活动选择 self.name,self.describe,self.photo,self.body)
#用于查询具体图文的函数 传入值v 进行活动的具体查询
det_list = Det.query.filter_by(name=v).first()
a = str(det_list)
b = a.split("*")
wcl_list = []
for y in b :
c = y.strip('\'')
wcl_list.append(c)
z_list = []
zw = wcl_list[-1]
zw = zw.split("295ff6c3-032b-4a83-a397-1cc0e754f785")
for x in zw:
x = x.split("f355c387-57f6-4734-af7e-26af5293d970")
z_list.append(x)
return render_template('det.html',lista = z_list)
def two_list_chuli(a):
z_list = []
for x in a:
a = str(x)
b = a.split("*")
wcl_list = []
for y in b :
c = y.strip('\'')
wcl_list.append(c)
z_list.append(wcl_list)
return z_list
@app.route('/api/',methods=['POST'])
def apidk():
#api post 提交方法
#重点重点 !!!中文post提交方法
text = request.get_data()
#重点重点 !!!中文post提交方法
if text is not None:
#重点重点 !!!中文post提交方法
text = unquote(str(text), 'utf-8')
#重点重点 !!!中文post提交方法
text = str(text)
text = text[2:]
text = text[:-1]
a = str(text)
d = a.strip("\"")
zlist = d.split("899340aa-5a52-42c4-b0ae-da135b0acb1f")
if zlist[0] == "update":
#模式为更新上传模式
if zlist[1] == "year":
#cho选择器为year
#“update 899340aa-5a52-42c4-b0ae-da135b0acb1f years 899340aa-5a52-42c4-b0ae-da135b0acb1f year_name 899340aa-5a52-42c4-b0ae-da135b0acb1f describe 899340aa-5a52-42c4-b0ae-da135b0acb1f photo_path”
tjb = Years(year = zlist[2],describe = zlist[3],photo = zlist[4])
db.session.add(tjb)
db.session.commit()
if zlist[1] == "det":
#cho选择器为det
print(zlist)
#“update 899340aa-5a52-42c4-b0ae-da135b0acb1f det 899340aa-5a52-42c4-b0ae-da135b0acb1f det_name 899340aa-5a52-42c4-b0ae-da135b0acb1f describe 899340aa-5a52-42c4-b0ae-da135b0acb1f photo_path 899340aa-5a52-42c4-b0ae-da135b0acb1f body 899340aa-5a52-42c4-b0ae-da135b0acb1f act_id”
a = Years.query.filter_by(year=zlist[6]).first()
tjb = Det(name = zlist[2],describe = zlist[3],photo = zlist[4],body=zlist[5],role=a)
db.session.add(tjb)
db.session.commit()
'''
if zlist[0] == "delete":
#模式为删除模式
if zlist[1] == "year":
#cho选择器为year
if zlist[1] == "det":
#cho选择器为det
'''
if zlist[0] == "search":
#模式为搜索模式
if zlist[1] == "year":
#cho选择器为year
a = Years.query.all()
return two_list_chuli(a)
if zlist[1] == "det":
#cho选择器为det
a = Det.query.all()
return two_list_chuli(a)
#用于上传图片的路由
@app.route("/api/upload/",methods=['POST'])
def upjpg():
upload_file = request.files['file']
old_file_name = upload_file.filename
yz_name = old_file_name.split("9046380f-3b5c-4cce-acd6-31a4f0088228")
print(old_file_name)
print(yz_name)
if upload_file:
if yz_name[0] == "year":
#file_path = os.path.join("hlby_web/static/img/year_min/", old_file_name)
file_path = os.path.join("/Users/lucy/Desktop/hlby_web/app/static/img/year_min", old_file_name)
upload_file.save(file_path)
if yz_name[0] == "detm":
#file_path = os.path.join("hlby_web/static/img/det_min/", old_file_name)
file_path = os.path.join("/Users/lucy/Desktop/hlby_web/app/static/img/det_min/", old_file_name)
upload_file.save(file_path)
if yz_name[0] == "det":
#file_path = os.path.join("hlby_web/static/img/det/", old_file_name)
file_path = os.path.join("/Users/lucy/Desktop/hlby_web/app/static/img/det/", old_file_name)
upload_file.save(file_path)
return '发送完成'
else:
return '发送失败'
if __name__ == '__main__':
#app.run(host='0.0.0.0',port=443,ssl_context=("fullchain.pem","privkey.pem"))
#app.run(host='0.0.0.0',debug = True,port=443,ssl_context=("fullchain.pem","privkey.pem"))
app.run(host='127.0.0.1',port=5000,debug = True)
#app.run(host='0.0.0.0',port=80)
#app.run(host='0.0.0.0',debug = True,port=80)
| 24.285266
| 292
| 0.599329
|
ac1b76a28bb9fe94bdc92c469edcc4d2c0a02346
| 13,417
|
py
|
Python
|
python/pywatchman_aio/__init__.py
|
pataquets/watchman
|
8e52efce212e87f485370bc25693802b8a999089
|
[
"Apache-2.0"
] | null | null | null |
python/pywatchman_aio/__init__.py
|
pataquets/watchman
|
8e52efce212e87f485370bc25693802b8a999089
|
[
"Apache-2.0"
] | null | null | null |
python/pywatchman_aio/__init__.py
|
pataquets/watchman
|
8e52efce212e87f485370bc25693802b8a999089
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2014-present Facebook, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Facebook nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, division, print_function
# no unicode literals
import asyncio
import os
import subprocess
import typing
from pywatchman import CommandError, WatchmanError, encoding
try:
from pywatchman import bser
except ImportError:
from pywatchman import pybser as bser
# 2 bytes marker, 1 byte int size, 8 bytes int64 value
SNIFF_LEN = 13
# TODO: Fix this when https://github.com/python/asyncio/issues/281 is resolved.
# tl;dr is that you cannot have different event loops running in different
# threads all fork subprocesses and listen for child events. The current
# workaround is to do the old fashioned blocking process communication using a
# ThreadPool.
def _resolve_sockname_helper():
# if invoked via a trigger, watchman will set this env var; we
# should use it unless explicitly set otherwise
path = os.getenv("WATCHMAN_SOCK")
if path:
return path
cmd = ["watchman", "--output-encoding=bser", "get-sockname"]
try:
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=os.name != "nt",
)
except OSError as e:
raise WatchmanError('"watchman" executable not in PATH (%s)', e)
stdout, stderr = p.communicate()
exitcode = p.poll()
if exitcode:
raise WatchmanError("watchman exited with code %d" % exitcode)
result = bser.loads(stdout)
if "error" in result:
raise WatchmanError(str(result["error"]))
return result["sockname"]
async def _resolve_sockname():
"""Find the Unix socket path to the global Watchman instance."""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, _resolve_sockname_helper)
class AsyncTransport(object):
"""Communication transport to the Watchman Service."""
async def activate(self, **kwargs):
"""Make the transport ready for use. Optional for subclasses."""
pass
async def read(self, size):
"""Read 'size' bytes from the transport."""
raise NotImplementedError()
async def write(self, buf):
"""Write 'buf' bytes to the transport."""
raise NotImplementedError()
def close(self):
"""Close the transport. Optional for subclasses."""
pass
class AsyncUnixSocketTransport(AsyncTransport):
"""Local Unix domain socket transport supporting asyncio."""
def __init__(self):
self.sockname = None
self.reader = None
self.writer = None
async def activate(self, **kwargs):
# Requires keyword-argument 'sockname'
reader, writer = await asyncio.open_unix_connection(kwargs["sockname"])
self.reader = reader
self.writer = writer
async def write(self, data):
self.writer.write(data)
await self.writer.drain()
async def read(self, size):
res = await self.reader.read(size)
if not len(res):
raise ConnectionResetError("connection closed")
return res
def close(self):
if self.writer:
self.writer.close()
class AsyncCodec(object):
"""Communication encoding for the Watchman service."""
def __init__(self, transport):
self.transport = transport
async def receive(self):
"""Read from the underlying transport, parse and return the message."""
raise NotImplementedError()
async def send(self, *args):
"""Send the given message via the underlying transport."""
raise NotImplementedError()
def close(self):
"""Close the underlying transport."""
self.transport.close()
# This requires BSERv2 support of the server, but doesn't gracefully check
# for the requisite capability being present in older versions.
class AsyncBserCodec(AsyncCodec):
"""Use the BSER encoding."""
async def receive(self):
sniff = await self.transport.read(SNIFF_LEN)
if not sniff:
raise WatchmanError("empty watchman response")
_1, _2, elen = bser.pdu_info(sniff)
rlen = len(sniff)
buf = bytearray(elen)
buf[:rlen] = sniff
while elen > rlen:
b = await self.transport.read(elen - rlen)
buf[rlen : rlen + len(b)] = b
rlen += len(b)
response = bytes(buf)
try:
res = self._loads(response)
return res
except ValueError as e:
raise WatchmanError("watchman response decode error: %s" % e)
async def send(self, *args):
cmd = bser.dumps(*args, version=2, capabilities=0)
await self.transport.write(cmd)
def _loads(self, response):
"""Parse the BSER packet"""
return bser.loads(
response,
True,
value_encoding=encoding.get_local_encoding(),
value_errors=encoding.default_local_errors,
)
class ReceiveLoopError(Exception):
pass
class AIOClient(object):
"""Create and manage an asyncio Watchman connection.
Example usage:
with await AIOClient.from_socket() as client:
res = await client.query(...)
# ... use res ...
await client.query(
'subscribe',
root_dir,
sub_name,
{expression: ..., ...},
)
while True:
sub_update = await client.get_subscription(sub_name, root_dir)
# ... process sub_update ...
"""
# Don't call this directly use ::from_socket() instead.
def __init__(self, connection):
self.connection = connection
self.log_queue = asyncio.Queue()
self.sub_by_root = {}
self.bilateral_response_queue = asyncio.Queue()
self.receive_task = None
self.receive_task_exception = None
self._closed = False
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
return False
async def receive_bilateral_response(self):
"""Receive the response to a request made to the Watchman service."""
self._check_receive_loop()
resp = await self.bilateral_response_queue.get()
self._check_error(resp)
return resp
async def query(self, *args):
"""Send a query to the Watchman service and return the response."""
self._check_receive_loop()
try:
await self.connection.send(args)
return await self.receive_bilateral_response()
except CommandError as ex:
ex.setCommand(args)
raise ex
async def capability_check(self, optional=None, required=None):
"""Perform a server capability check."""
self._check_receive_loop()
# If the returned response is an error, self.query will raise an error
await self.query(
"version", {"optional": optional or [], "required": required or []}
)
async def get_subscription(self, name, root):
"""Retrieve the data associated with a named subscription
Returns None if there is no data associated with `name`
If root is not None, then only return the subscription
data that matches both root and name. When used in this way,
remove processing impacts both the unscoped and scoped stores
for the subscription data.
"""
self._check_receive_loop()
self._ensure_subscription_queue_exists(name, root)
res = await self.sub_by_root[root][name].get()
self._check_error(res)
return res
async def pop_log(self):
"""Get one log from the log queue."""
self._check_receive_loop()
res = self.log_queue.get()
self._check_error(res)
return res
def close(self):
"""Close the underlying connection."""
self._closed = True
if self.receive_task:
self.receive_task.cancel()
if self.connection:
self.connection.close()
def enable_receiving(self, loop=None):
"""Schedules the receive loop to run on the given loop."""
self.receive_task = asyncio.ensure_future(self._receive_loop(), loop=loop)
def do_if_done(fut):
try:
fut.result()
except asyncio.CancelledError:
pass
except Exception as ex:
self.receive_task_exception = ex
self.receive_task = None
self.receive_task.add_done_callback(do_if_done)
@classmethod
async def from_socket(cls, sockname: typing.Optional[str] = None) -> "AIOClient":
"""Create a new AIOClient using Unix transport and BSER Codec
connecting to the specified socket. If the specified socket is None,
then resolve the socket path automatically.
This method also schedules the receive loop to run on the event loop.
This method is a coroutine."""
if not sockname:
sockname = await _resolve_sockname()
transport = AsyncUnixSocketTransport()
await transport.activate(sockname=sockname)
connection = AsyncBserCodec(transport)
obj = cls(connection)
obj.enable_receiving()
return obj
async def _receive_loop(self):
"""Receive the response to a request made to the Watchman service.
Note that when trying to receive a PDU from the Watchman service,
we might get a unilateral response to a subscription or log, so these
are processed and queued up for later retrieval. This function only
returns when a non-unilateral response is received."""
try:
while True:
response = await self.connection.receive()
if self._is_unilateral(response):
await self._process_unilateral_response(response)
else:
await self.bilateral_response_queue.put(response)
except Exception as ex:
await self._broadcast_exception(ex)
# We may get a cancel exception on close, so don't close again.
if not self._closed:
self.close()
async def _broadcast_exception(self, ex):
await self.bilateral_response_queue.put(ex)
await self.log_queue.put(ex)
for root in self.sub_by_root.values():
for sub_queue in root.values():
await sub_queue.put(ex)
def _check_error(self, res):
if isinstance(res, Exception):
raise res
if "error" in res:
raise CommandError(res["error"])
def _check_receive_loop(self):
if self._closed:
raise Exception("Connection has been closed, make a new one to reconnect.")
if self.receive_task is None:
raise ReceiveLoopError("Receive loop was not started.")
def _is_unilateral(self, res):
return res.get("unilateral") or "subscription" in res or "log" in res
def _ensure_subscription_queue_exists(self, name, root):
# Note this function must be called from an async function on only one
# event loop.
self.sub_by_root.setdefault(root, {}).setdefault(name, asyncio.Queue())
async def _process_unilateral_response(self, response):
if "log" in response:
await self.log_queue.put(response["log"])
elif "subscription" in response:
sub = response["subscription"]
root = os.path.normcase(response["root"])
self._ensure_subscription_queue_exists(sub, root)
await self.sub_by_root[root][sub].put(response)
elif self._is_unilateral(response):
raise WatchmanError("Unknown unilateral response: " + str(response))
else:
raise WatchmanError("Not a unilateral response: " + str(response))
| 33.967089
| 87
| 0.64858
|
60ae69176cfd0478e9c9f974bd61deafbe16736b
| 61,334
|
py
|
Python
|
lib/sqlalchemy/orm/strategy_options.py
|
cctvadmin/sqlalchemy
|
0d5508d77653b37368ff9de22307c154cc90cf71
|
[
"MIT"
] | 1
|
2021-05-14T01:38:21.000Z
|
2021-05-14T01:38:21.000Z
|
lib/sqlalchemy/orm/strategy_options.py
|
cctvadmin/sqlalchemy
|
0d5508d77653b37368ff9de22307c154cc90cf71
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/orm/strategy_options.py
|
cctvadmin/sqlalchemy
|
0d5508d77653b37368ff9de22307c154cc90cf71
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
"""
from . import util as orm_util
from .attributes import QueryableAttribute
from .base import _class_to_mapper
from .base import _is_aliased_class
from .base import _is_mapped_class
from .base import InspectionAttr
from .interfaces import LoaderOption
from .interfaces import MapperProperty
from .interfaces import PropComparator
from .path_registry import _DEFAULT_TOKEN
from .path_registry import _WILDCARD_TOKEN
from .path_registry import PathRegistry
from .path_registry import TokenRegistry
from .util import _orm_full_deannotate
from .. import exc as sa_exc
from .. import inspect
from .. import util
from ..sql import and_
from ..sql import coercions
from ..sql import roles
from ..sql import visitors
from ..sql.base import _generative
from ..sql.base import Generative
if util.TYPE_CHECKING:
from typing import Sequence
from .context import QueryContext
from ..sql.elements import ColumnElement
class Load(Generative, LoaderOption):
"""Represents loader options which modify the state of a
:class:`_query.Query` in order to affect how various mapped attributes are
loaded.
The :class:`_orm.Load` object is in most cases used implicitly behind the
scenes when one makes use of a query option like :func:`_orm.joinedload`,
:func:`.defer`, or similar. However, the :class:`_orm.Load` object
can also be used directly, and in some cases can be useful.
To use :class:`_orm.Load` directly, instantiate it with the target mapped
class as the argument. This style of usage is
useful when dealing with a :class:`_query.Query`
that has multiple entities::
myopt = Load(MyClass).joinedload("widgets")
The above ``myopt`` can now be used with :meth:`_query.Query.options`,
where it
will only take effect for the ``MyClass`` entity::
session.query(MyClass, MyOtherClass).options(myopt)
One case where :class:`_orm.Load`
is useful as public API is when specifying
"wildcard" options that only take effect for a certain class::
session.query(Order).options(Load(Order).lazyload('*'))
Above, all relationships on ``Order`` will be lazy-loaded, but other
attributes on those descendant objects will load using their normal
loader strategy.
.. seealso::
:ref:`deferred_options`
:ref:`deferred_loading_w_multiple`
:ref:`relationship_loader_options`
"""
_cache_key_traversal = [
("path", visitors.ExtendedInternalTraversal.dp_has_cache_key),
("strategy", visitors.ExtendedInternalTraversal.dp_plain_obj),
("_of_type", visitors.ExtendedInternalTraversal.dp_multi),
("_extra_criteria", visitors.InternalTraversal.dp_clauseelement_list),
(
"_context_cache_key",
visitors.ExtendedInternalTraversal.dp_has_cache_key_tuples,
),
(
"local_opts",
visitors.ExtendedInternalTraversal.dp_string_multi_dict,
),
]
def __init__(self, entity):
insp = inspect(entity)
insp._post_inspect
self.path = insp._path_registry
# note that this .context is shared among all descendant
# Load objects
self.context = util.OrderedDict()
self.local_opts = {}
self.is_class_strategy = False
@classmethod
def for_existing_path(cls, path):
load = cls.__new__(cls)
load.path = path
load.context = {}
load.local_opts = {}
load._of_type = None
load._extra_criteria = ()
return load
def _generate_extra_criteria(self, context):
# type: (QueryContext) -> Sequence[ColumnElement]
"""Apply the current bound parameters in a QueryContext to the
"extra_criteria" stored with this Load object.
Load objects are typically pulled from the cached version of
the statement from a QueryContext. The statement currently being
executed will have new values (and keys) for bound parameters in the
extra criteria which need to be applied by loader strategies when
they handle this criteria for a result set.
"""
assert (
self._extra_criteria
), "this should only be called if _extra_criteria is present"
orig_query = context.compile_state.select_statement
current_query = context.query
# NOTE: while it seems like we should not do the "apply" operation
# here if orig_query is current_query, skipping it in the "optimized"
# case causes the query to be different from a cache key perspective,
# because we are creating a copy of the criteria which is no longer
# the same identity of the _extra_criteria in the loader option
# itself. cache key logic produces a different key for
# (A, copy_of_A) vs. (A, A), because in the latter case it shortens
# the second part of the key to just indicate on identity.
# if orig_query is current_query:
# not cached yet. just do the and_()
# return and_(*self._extra_criteria)
k1 = orig_query._generate_cache_key()
k2 = current_query._generate_cache_key()
return k2._apply_params_to_element(k1, and_(*self._extra_criteria))
@property
def _context_cache_key(self):
serialized = []
if self.context is None:
return []
for (key, loader_path), obj in self.context.items():
if key != "loader":
continue
serialized.append(loader_path + (obj,))
return serialized
def _generate(self):
cloned = super(Load, self)._generate()
cloned.local_opts = {}
return cloned
is_opts_only = False
is_class_strategy = False
strategy = None
propagate_to_loaders = False
_of_type = None
_extra_criteria = ()
def process_compile_state(self, compile_state):
if not compile_state.compile_options._enable_eagerloads:
return
self._process(compile_state, not bool(compile_state.current_path))
def _process(self, compile_state, raiseerr):
is_refresh = compile_state.compile_options._for_refresh_state
current_path = compile_state.current_path
if current_path:
for (token, start_path), loader in self.context.items():
if is_refresh and not loader.propagate_to_loaders:
continue
chopped_start_path = self._chop_path(start_path, current_path)
if chopped_start_path is not None:
compile_state.attributes[
(token, chopped_start_path)
] = loader
else:
compile_state.attributes.update(self.context)
def _generate_path(
self,
path,
attr,
for_strategy,
wildcard_key,
raiseerr=True,
polymorphic_entity_context=None,
):
existing_of_type = self._of_type
self._of_type = None
if raiseerr and not path.has_entity:
if isinstance(path, TokenRegistry):
raise sa_exc.ArgumentError(
"Wildcard token cannot be followed by another entity"
)
else:
raise sa_exc.ArgumentError(
"Mapped attribute '%s' does not "
"refer to a mapped entity" % (path.prop,)
)
if isinstance(attr, util.string_types):
default_token = attr.endswith(_DEFAULT_TOKEN)
attr_str_name = attr
if attr.endswith(_WILDCARD_TOKEN) or default_token:
if default_token:
self.propagate_to_loaders = False
if wildcard_key:
attr = "%s:%s" % (wildcard_key, attr)
# TODO: AliasedInsp inside the path for of_type is not
# working for a with_polymorphic entity because the
# relationship loaders don't render the with_poly into the
# path. See #4469 which will try to improve this
if existing_of_type and not existing_of_type.is_aliased_class:
path = path.parent[existing_of_type]
path = path.token(attr)
self.path = path
return path
if existing_of_type:
ent = inspect(existing_of_type)
else:
ent = path.entity
util.warn_deprecated_20(
"Using strings to indicate column or "
"relationship paths in loader options is deprecated "
"and will be removed in SQLAlchemy 2.0. Please use "
"the class-bound attribute directly."
)
try:
# use getattr on the class to work around
# synonyms, hybrids, etc.
attr = getattr(ent.class_, attr)
except AttributeError as err:
if raiseerr:
util.raise_(
sa_exc.ArgumentError(
'Can\'t find property named "%s" on '
"%s in this Query." % (attr, ent)
),
replace_context=err,
)
else:
return None
else:
try:
attr = found_property = attr.property
except AttributeError as ae:
if not isinstance(attr, MapperProperty):
util.raise_(
sa_exc.ArgumentError(
'Expected attribute "%s" on %s to be a '
"mapped attribute; "
"instead got %s object."
% (attr_str_name, ent, type(attr))
),
replace_context=ae,
)
else:
raise
path = path[attr]
else:
insp = inspect(attr)
if insp.is_mapper or insp.is_aliased_class:
# TODO: this does not appear to be a valid codepath. "attr"
# would never be a mapper. This block is present in 1.2
# as well however does not seem to be accessed in any tests.
if not orm_util._entity_corresponds_to_use_path_impl(
attr.parent, path[-1]
):
if raiseerr:
raise sa_exc.ArgumentError(
"Attribute '%s' does not "
"link from element '%s'" % (attr, path.entity)
)
else:
return None
elif insp.is_property:
prop = found_property = attr
path = path[prop]
elif insp.is_attribute:
prop = found_property = attr.property
if not orm_util._entity_corresponds_to_use_path_impl(
attr.parent, path[-1]
):
if raiseerr:
raise sa_exc.ArgumentError(
'Attribute "%s" does not '
'link from element "%s".%s'
% (
attr,
path.entity,
(
" Did you mean to use "
"%s.of_type(%s)?"
% (path[-2], attr.class_.__name__)
if len(path) > 1
and path.entity.is_mapper
and attr.parent.is_aliased_class
else ""
),
)
)
else:
return None
if attr._extra_criteria:
self._extra_criteria = attr._extra_criteria
if getattr(attr, "_of_type", None):
ac = attr._of_type
ext_info = of_type_info = inspect(ac)
if polymorphic_entity_context is None:
polymorphic_entity_context = self.context
existing = path.entity_path[prop].get(
polymorphic_entity_context, "path_with_polymorphic"
)
if not ext_info.is_aliased_class:
ac = orm_util.with_polymorphic(
ext_info.mapper.base_mapper,
ext_info.mapper,
aliased=True,
_use_mapper_path=True,
_existing_alias=inspect(existing)
if existing is not None
else None,
)
ext_info = inspect(ac)
path.entity_path[prop].set(
polymorphic_entity_context, "path_with_polymorphic", ac
)
path = path[prop][ext_info]
self._of_type = of_type_info
else:
path = path[prop]
if for_strategy is not None:
found_property._get_strategy(for_strategy)
if path.has_entity:
path = path.entity_path
self.path = path
return path
def __str__(self):
return "Load(strategy=%r)" % (self.strategy,)
def _coerce_strat(self, strategy):
if strategy is not None:
strategy = tuple(sorted(strategy.items()))
return strategy
def _apply_to_parent(self, parent, applied, bound):
raise NotImplementedError(
"Only 'unbound' loader options may be used with the "
"Load.options() method"
)
@_generative
def options(self, *opts):
r"""Apply a series of options as sub-options to this
:class:`_orm.Load`
object.
E.g.::
query = session.query(Author)
query = query.options(
joinedload(Author.book).options(
load_only("summary", "excerpt"),
joinedload(Book.citations).options(
joinedload(Citation.author)
)
)
)
:param \*opts: A series of loader option objects (ultimately
:class:`_orm.Load` objects) which should be applied to the path
specified by this :class:`_orm.Load` object.
.. versionadded:: 1.3.6
.. seealso::
:func:`.defaultload`
:ref:`relationship_loader_options`
:ref:`deferred_loading_w_multiple`
"""
apply_cache = {}
bound = not isinstance(self, _UnboundLoad)
if bound:
raise NotImplementedError(
"The options() method is currently only supported "
"for 'unbound' loader options"
)
for opt in opts:
opt._apply_to_parent(self, apply_cache, bound)
@_generative
def set_relationship_strategy(
self, attr, strategy, propagate_to_loaders=True
):
strategy = self._coerce_strat(strategy)
self.propagate_to_loaders = propagate_to_loaders
cloned = self._clone_for_bind_strategy(attr, strategy, "relationship")
self.path = cloned.path
self._of_type = cloned._of_type
self._extra_criteria = cloned._extra_criteria
cloned.is_class_strategy = self.is_class_strategy = False
self.propagate_to_loaders = cloned.propagate_to_loaders
@_generative
def set_column_strategy(self, attrs, strategy, opts=None, opts_only=False):
strategy = self._coerce_strat(strategy)
self.is_class_strategy = False
for attr in attrs:
cloned = self._clone_for_bind_strategy(
attr, strategy, "column", opts_only=opts_only, opts=opts
)
cloned.propagate_to_loaders = True
@_generative
def set_generic_strategy(self, attrs, strategy):
strategy = self._coerce_strat(strategy)
for attr in attrs:
cloned = self._clone_for_bind_strategy(attr, strategy, None)
cloned.propagate_to_loaders = True
@_generative
def set_class_strategy(self, strategy, opts):
strategy = self._coerce_strat(strategy)
cloned = self._clone_for_bind_strategy(None, strategy, None)
cloned.is_class_strategy = True
cloned.propagate_to_loaders = True
cloned.local_opts.update(opts)
def _clone_for_bind_strategy(
self, attr, strategy, wildcard_key, opts_only=False, opts=None
):
"""Create an anonymous clone of the Load/_UnboundLoad that is suitable
to be placed in the context / _to_bind collection of this Load
object. The clone will then lose references to context/_to_bind
in order to not create reference cycles.
"""
cloned = self._generate()
cloned._generate_path(self.path, attr, strategy, wildcard_key)
cloned.strategy = strategy
cloned.local_opts = self.local_opts
if opts:
cloned.local_opts.update(opts)
if opts_only:
cloned.is_opts_only = True
if strategy or cloned.is_opts_only:
cloned._set_path_strategy()
return cloned
def _set_for_path(self, context, path, replace=True, merge_opts=False):
if merge_opts or not replace:
existing = path.get(context, "loader")
if existing:
if merge_opts:
existing.local_opts.update(self.local_opts)
existing._extra_criteria += self._extra_criteria
else:
path.set(context, "loader", self)
else:
existing = path.get(context, "loader")
path.set(context, "loader", self)
if existing and existing.is_opts_only:
self.local_opts.update(existing.local_opts)
existing._extra_criteria += self._extra_criteria
def _set_path_strategy(self):
if not self.is_class_strategy and self.path.has_entity:
effective_path = self.path.parent
else:
effective_path = self.path
if effective_path.is_token:
for path in effective_path.generate_for_superclasses():
self._set_for_path(
self.context,
path,
replace=True,
merge_opts=self.is_opts_only,
)
else:
self._set_for_path(
self.context,
effective_path,
replace=True,
merge_opts=self.is_opts_only,
)
# remove cycles; _set_path_strategy is always invoked on an
# anonymous clone of the Load / UnboundLoad object since #5056
self.context = None
def __getstate__(self):
d = self.__dict__.copy()
# can't pickle this right now; warning is raised by strategies
d["_extra_criteria"] = ()
if d["context"] is not None:
d["context"] = PathRegistry.serialize_context_dict(
d["context"], ("loader",)
)
d["path"] = self.path.serialize()
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.path = PathRegistry.deserialize(self.path)
if self.context is not None:
self.context = PathRegistry.deserialize_context_dict(self.context)
def _chop_path(self, to_chop, path):
i = -1
for i, (c_token, p_token) in enumerate(zip(to_chop, path.path)):
if isinstance(c_token, util.string_types):
# TODO: this is approximated from the _UnboundLoad
# version and probably has issues, not fully covered.
if i == 0 and c_token.endswith(":" + _DEFAULT_TOKEN):
return to_chop
elif (
c_token != "relationship:%s" % (_WILDCARD_TOKEN,)
and c_token != p_token.key
):
return None
if c_token is p_token:
continue
elif (
isinstance(c_token, InspectionAttr)
and c_token.is_mapper
and p_token.is_mapper
and c_token.isa(p_token)
):
continue
else:
return None
return to_chop[i + 1 :]
class _UnboundLoad(Load):
"""Represent a loader option that isn't tied to a root entity.
The loader option will produce an entity-linked :class:`_orm.Load`
object when it is passed :meth:`_query.Query.options`.
This provides compatibility with the traditional system
of freestanding options, e.g. ``joinedload('x.y.z')``.
"""
def __init__(self):
self.path = ()
self._to_bind = []
self.local_opts = {}
self._extra_criteria = ()
_cache_key_traversal = [
("path", visitors.ExtendedInternalTraversal.dp_multi_list),
("strategy", visitors.ExtendedInternalTraversal.dp_plain_obj),
("_to_bind", visitors.ExtendedInternalTraversal.dp_has_cache_key_list),
("_extra_criteria", visitors.InternalTraversal.dp_clauseelement_list),
(
"local_opts",
visitors.ExtendedInternalTraversal.dp_string_multi_dict,
),
]
_is_chain_link = False
def _set_path_strategy(self):
self._to_bind.append(self)
# remove cycles; _set_path_strategy is always invoked on an
# anonymous clone of the Load / UnboundLoad object since #5056
self._to_bind = None
def _apply_to_parent(self, parent, applied, bound, to_bind=None):
if self in applied:
return applied[self]
if to_bind is None:
to_bind = self._to_bind
cloned = self._generate()
applied[self] = cloned
cloned.strategy = self.strategy
if self.path:
attr = self.path[-1]
if isinstance(attr, util.string_types) and attr.endswith(
_DEFAULT_TOKEN
):
attr = attr.split(":")[0] + ":" + _WILDCARD_TOKEN
cloned._generate_path(
parent.path + self.path[0:-1], attr, self.strategy, None
)
# these assertions can go away once the "sub options" API is
# mature
assert cloned.propagate_to_loaders == self.propagate_to_loaders
assert cloned.is_class_strategy == self.is_class_strategy
assert cloned.is_opts_only == self.is_opts_only
new_to_bind = {
elem._apply_to_parent(parent, applied, bound, to_bind)
for elem in to_bind
}
cloned._to_bind = parent._to_bind
cloned._to_bind.extend(new_to_bind)
cloned.local_opts.update(self.local_opts)
return cloned
def _generate_path(self, path, attr, for_strategy, wildcard_key):
if (
wildcard_key
and isinstance(attr, util.string_types)
and attr in (_WILDCARD_TOKEN, _DEFAULT_TOKEN)
):
if attr == _DEFAULT_TOKEN:
self.propagate_to_loaders = False
attr = "%s:%s" % (wildcard_key, attr)
if path and _is_mapped_class(path[-1]) and not self.is_class_strategy:
path = path[0:-1]
if attr:
path = path + (attr,)
self.path = path
self._extra_criteria = getattr(attr, "_extra_criteria", ())
return path
def __getstate__(self):
d = self.__dict__.copy()
# can't pickle this right now; warning is raised by strategies
d["_extra_criteria"] = ()
d["path"] = self._serialize_path(self.path, filter_aliased_class=True)
return d
def __setstate__(self, state):
ret = []
for key in state["path"]:
if isinstance(key, tuple):
if len(key) == 2:
# support legacy
cls, propkey = key
of_type = None
else:
cls, propkey, of_type = key
prop = getattr(cls, propkey)
if of_type:
prop = prop.of_type(of_type)
ret.append(prop)
else:
ret.append(key)
state["path"] = tuple(ret)
self.__dict__ = state
def _process(self, compile_state, raiseerr):
dedupes = compile_state.attributes["_unbound_load_dedupes"]
is_refresh = compile_state.compile_options._for_refresh_state
for val in self._to_bind:
if val not in dedupes:
dedupes.add(val)
if is_refresh and not val.propagate_to_loaders:
continue
val._bind_loader(
[
ent.entity_zero
for ent in compile_state._mapper_entities
],
compile_state.current_path,
compile_state.attributes,
raiseerr,
)
@classmethod
def _from_keys(cls, meth, keys, chained, kw):
opt = _UnboundLoad()
def _split_key(key):
if isinstance(key, util.string_types):
# coerce fooload('*') into "default loader strategy"
if key == _WILDCARD_TOKEN:
return (_DEFAULT_TOKEN,)
# coerce fooload(".*") into "wildcard on default entity"
elif key.startswith("." + _WILDCARD_TOKEN):
key = key[1:]
return key.split(".")
else:
return (key,)
all_tokens = [token for key in keys for token in _split_key(key)]
for token in all_tokens[0:-1]:
# set _is_chain_link first so that clones of the
# object also inherit this flag
opt._is_chain_link = True
if chained:
opt = meth(opt, token, **kw)
else:
opt = opt.defaultload(token)
opt = meth(opt, all_tokens[-1], **kw)
opt._is_chain_link = False
return opt
def _chop_path(self, to_chop, path):
i = -1
for i, (c_token, (p_entity, p_prop)) in enumerate(
zip(to_chop, path.pairs())
):
if isinstance(c_token, util.string_types):
if i == 0 and c_token.endswith(":" + _DEFAULT_TOKEN):
return to_chop
elif (
c_token != "relationship:%s" % (_WILDCARD_TOKEN,)
and c_token != p_prop.key
):
return None
elif isinstance(c_token, PropComparator):
if c_token.property is not p_prop or (
c_token._parententity is not p_entity
and (
not c_token._parententity.is_mapper
or not c_token._parententity.isa(p_entity)
)
):
return None
else:
i += 1
return to_chop[i:]
def _serialize_path(self, path, filter_aliased_class=False):
ret = []
for token in path:
if isinstance(token, QueryableAttribute):
if (
filter_aliased_class
and token._of_type
and inspect(token._of_type).is_aliased_class
):
ret.append((token._parentmapper.class_, token.key, None))
else:
ret.append(
(
token._parentmapper.class_,
token.key,
token._of_type.entity if token._of_type else None,
)
)
elif isinstance(token, PropComparator):
ret.append((token._parentmapper.class_, token.key, None))
else:
ret.append(token)
return ret
def _bind_loader(self, entities, current_path, context, raiseerr):
"""Convert from an _UnboundLoad() object into a Load() object.
The _UnboundLoad() uses an informal "path" and does not necessarily
refer to a lead entity as it may use string tokens. The Load()
OTOH refers to a complete path. This method reconciles from a
given Query into a Load.
Example::
query = session.query(User).options(
joinedload("orders").joinedload("items"))
The above options will be an _UnboundLoad object along the lines
of (note this is not the exact API of _UnboundLoad)::
_UnboundLoad(
_to_bind=[
_UnboundLoad(["orders"], {"lazy": "joined"}),
_UnboundLoad(["orders", "items"], {"lazy": "joined"}),
]
)
After this method, we get something more like this (again this is
not exact API)::
Load(
User,
(User, User.orders.property))
Load(
User,
(User, User.orders.property, Order, Order.items.property))
"""
start_path = self.path
if self.is_class_strategy and current_path:
start_path += (entities[0],)
# _current_path implies we're in a
# secondary load with an existing path
if current_path:
start_path = self._chop_path(start_path, current_path)
if not start_path:
return None
# look at the first token and try to locate within the Query
# what entity we are referring towards.
token = start_path[0]
if isinstance(token, util.string_types):
entity = self._find_entity_basestring(entities, token, raiseerr)
elif isinstance(token, PropComparator):
prop = token.property
entity = self._find_entity_prop_comparator(
entities, prop, token._parententity, raiseerr
)
elif self.is_class_strategy and _is_mapped_class(token):
entity = inspect(token)
if entity not in entities:
entity = None
else:
raise sa_exc.ArgumentError(
"mapper option expects " "string key or list of attributes"
)
if not entity:
return
path_element = entity
# transfer our entity-less state into a Load() object
# with a real entity path. Start with the lead entity
# we just located, then go through the rest of our path
# tokens and populate into the Load().
loader = Load(path_element)
if context is None:
context = loader.context
loader.strategy = self.strategy
loader.is_opts_only = self.is_opts_only
loader.is_class_strategy = self.is_class_strategy
path = loader.path
if not loader.is_class_strategy:
for idx, token in enumerate(start_path):
if not loader._generate_path(
loader.path,
token,
self.strategy if idx == len(start_path) - 1 else None,
None,
raiseerr,
polymorphic_entity_context=context,
):
return
loader.local_opts.update(self.local_opts)
if not loader.is_class_strategy and loader.path.has_entity:
effective_path = loader.path.parent
else:
effective_path = loader.path
# prioritize "first class" options over those
# that were "links in the chain", e.g. "x" and "y" in
# someload("x.y.z") versus someload("x") / someload("x.y")
if effective_path.is_token:
for path in effective_path.generate_for_superclasses():
loader._set_for_path(
context,
path,
replace=not self._is_chain_link,
merge_opts=self.is_opts_only,
)
else:
loader._set_for_path(
context,
effective_path,
replace=not self._is_chain_link,
merge_opts=self.is_opts_only,
)
return loader
def _find_entity_prop_comparator(self, entities, prop, mapper, raiseerr):
if _is_aliased_class(mapper):
searchfor = mapper
else:
searchfor = _class_to_mapper(mapper)
for ent in entities:
if orm_util._entity_corresponds_to(ent, searchfor):
return ent
else:
if raiseerr:
if not list(entities):
raise sa_exc.ArgumentError(
"Query has only expression-based entities, "
'which do not apply to %s "%s"'
% (util.clsname_as_plain_name(type(prop)), prop)
)
else:
raise sa_exc.ArgumentError(
'Mapped attribute "%s" does not apply to any of the '
"root entities in this query, e.g. %s. Please "
"specify the full path "
"from one of the root entities to the target "
"attribute. "
% (prop, ", ".join(str(x) for x in entities))
)
else:
return None
def _find_entity_basestring(self, entities, token, raiseerr):
if token.endswith(":" + _WILDCARD_TOKEN):
if len(list(entities)) != 1:
if raiseerr:
raise sa_exc.ArgumentError(
"Can't apply wildcard ('*') or load_only() "
"loader option to multiple entities %s. Specify "
"loader options for each entity individually, such "
"as %s."
% (
", ".join(str(ent) for ent in entities),
", ".join(
"Load(%s).some_option('*')" % ent
for ent in entities
),
)
)
elif token.endswith(_DEFAULT_TOKEN):
raiseerr = False
for ent in entities:
# return only the first _MapperEntity when searching
# based on string prop name. Ideally object
# attributes are used to specify more exactly.
return ent
else:
if raiseerr:
raise sa_exc.ArgumentError(
"Query has only expression-based entities - "
'can\'t find property named "%s".' % (token,)
)
else:
return None
class loader_option(object):
def __init__(self):
pass
def __call__(self, fn):
self.name = name = fn.__name__
self.fn = fn
if hasattr(Load, name):
raise TypeError("Load class already has a %s method." % (name))
setattr(Load, name, fn)
return self
def _add_unbound_fn(self, fn):
self._unbound_fn = fn
fn_doc = self.fn.__doc__
self.fn.__doc__ = """Produce a new :class:`_orm.Load` object with the
:func:`_orm.%(name)s` option applied.
See :func:`_orm.%(name)s` for usage examples.
""" % {
"name": self.name
}
fn.__doc__ = fn_doc
return self
def _add_unbound_all_fn(self, fn):
fn.__doc__ = """Produce a standalone "all" option for
:func:`_orm.%(name)s`.
.. deprecated:: 0.9
The :func:`_orm.%(name)s_all` function is deprecated, and will be removed
in a future release. Please use method chaining with
:func:`_orm.%(name)s` instead, as in::
session.query(MyClass).options(
%(name)s("someattribute").%(name)s("anotherattribute")
)
""" % {
"name": self.name
}
fn = util.deprecated(
# This is used by `baked_lazyload_all` was only deprecated in
# version 1.2 so this must stick around until that is removed
"0.9",
"The :func:`.%(name)s_all` function is deprecated, and will be "
"removed in a future release. Please use method chaining with "
":func:`.%(name)s` instead" % {"name": self.name},
add_deprecation_to_docstring=False,
)(fn)
self._unbound_all_fn = fn
return self
@loader_option()
def contains_eager(loadopt, attr, alias=None):
r"""Indicate that the given attribute should be eagerly loaded from
columns stated manually in the query.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
The option is used in conjunction with an explicit join that loads
the desired rows, i.e.::
sess.query(Order).\
join(Order.user).\
options(contains_eager(Order.user))
The above query would join from the ``Order`` entity to its related
``User`` entity, and the returned ``Order`` objects would have the
``Order.user`` attribute pre-populated.
It may also be used for customizing the entries in an eagerly loaded
collection; queries will normally want to use the
:meth:`_query.Query.populate_existing` method assuming the primary
collection of parent objects may already have been loaded::
sess.query(User).\
join(User.addresses).\
filter(Address.email_address.like('%@aol.com')).\
options(contains_eager(User.addresses)).\
populate_existing()
See the section :ref:`contains_eager` for complete usage details.
.. seealso::
:ref:`loading_toplevel`
:ref:`contains_eager`
"""
if alias is not None:
if not isinstance(alias, str):
info = inspect(alias)
alias = info.selectable
else:
util.warn_deprecated(
"Passing a string name for the 'alias' argument to "
"'contains_eager()` is deprecated, and will not work in a "
"future release. Please use a sqlalchemy.alias() or "
"sqlalchemy.orm.aliased() construct.",
version="1.4",
)
elif getattr(attr, "_of_type", None):
ot = inspect(attr._of_type)
alias = ot.selectable
cloned = loadopt.set_relationship_strategy(
attr, {"lazy": "joined"}, propagate_to_loaders=False
)
cloned.local_opts["eager_from_alias"] = alias
return cloned
@contains_eager._add_unbound_fn
def contains_eager(*keys, **kw):
return _UnboundLoad()._from_keys(
_UnboundLoad.contains_eager, keys, True, kw
)
@loader_option()
def load_only(loadopt, *attrs):
"""Indicate that for a particular entity, only the given list
of column-based attribute names should be loaded; all others will be
deferred.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
Example - given a class ``User``, load only the ``name`` and ``fullname``
attributes::
session.query(User).options(load_only("name", "fullname"))
Example - given a relationship ``User.addresses -> Address``, specify
subquery loading for the ``User.addresses`` collection, but on each
``Address`` object load only the ``email_address`` attribute::
session.query(User).options(
subqueryload("addresses").load_only("email_address")
)
For a :class:`_query.Query` that has multiple entities,
the lead entity can be
specifically referred to using the :class:`_orm.Load` constructor::
session.query(User, Address).join(User.addresses).options(
Load(User).load_only("name", "fullname"),
Load(Address).load_only("email_address")
)
.. note:: This method will still load a :class:`_schema.Column` even
if the column property is defined with ``deferred=True``
for the :func:`.column_property` function.
.. versionadded:: 0.9.0
"""
cloned = loadopt.set_column_strategy(
attrs, {"deferred": False, "instrument": True}
)
cloned.set_column_strategy(
"*", {"deferred": True, "instrument": True}, {"undefer_pks": True}
)
return cloned
@load_only._add_unbound_fn
def load_only(*attrs):
return _UnboundLoad().load_only(*attrs)
@loader_option()
def joinedload(loadopt, attr, innerjoin=None):
"""Indicate that the given attribute should be loaded using joined
eager loading.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
examples::
# joined-load the "orders" collection on "User"
query(User).options(joinedload(User.orders))
# joined-load Order.items and then Item.keywords
query(Order).options(
joinedload(Order.items).joinedload(Item.keywords))
# lazily load Order.items, but when Items are loaded,
# joined-load the keywords collection
query(Order).options(
lazyload(Order.items).joinedload(Item.keywords))
:param innerjoin: if ``True``, indicates that the joined eager load should
use an inner join instead of the default of left outer join::
query(Order).options(joinedload(Order.user, innerjoin=True))
In order to chain multiple eager joins together where some may be
OUTER and others INNER, right-nested joins are used to link them::
query(A).options(
joinedload(A.bs, innerjoin=False).
joinedload(B.cs, innerjoin=True)
)
The above query, linking A.bs via "outer" join and B.cs via "inner" join
would render the joins as "a LEFT OUTER JOIN (b JOIN c)". When using
older versions of SQLite (< 3.7.16), this form of JOIN is translated to
use full subqueries as this syntax is otherwise not directly supported.
The ``innerjoin`` flag can also be stated with the term ``"unnested"``.
This indicates that an INNER JOIN should be used, *unless* the join
is linked to a LEFT OUTER JOIN to the left, in which case it
will render as LEFT OUTER JOIN. For example, supposing ``A.bs``
is an outerjoin::
query(A).options(
joinedload(A.bs).
joinedload(B.cs, innerjoin="unnested")
)
The above join will render as "a LEFT OUTER JOIN b LEFT OUTER JOIN c",
rather than as "a LEFT OUTER JOIN (b JOIN c)".
.. note:: The "unnested" flag does **not** affect the JOIN rendered
from a many-to-many association table, e.g. a table configured
as :paramref:`_orm.relationship.secondary`, to the target table; for
correctness of results, these joins are always INNER and are
therefore right-nested if linked to an OUTER join.
.. versionchanged:: 1.0.0 ``innerjoin=True`` now implies
``innerjoin="nested"``, whereas in 0.9 it implied
``innerjoin="unnested"``. In order to achieve the pre-1.0 "unnested"
inner join behavior, use the value ``innerjoin="unnested"``.
See :ref:`migration_3008`.
.. note::
The joins produced by :func:`_orm.joinedload` are **anonymously
aliased**. The criteria by which the join proceeds cannot be
modified, nor can the :class:`_query.Query`
refer to these joins in any way,
including ordering. See :ref:`zen_of_eager_loading` for further
detail.
To produce a specific SQL JOIN which is explicitly available, use
:meth:`_query.Query.join`.
To combine explicit JOINs with eager loading
of collections, use :func:`_orm.contains_eager`; see
:ref:`contains_eager`.
.. seealso::
:ref:`loading_toplevel`
:ref:`joined_eager_loading`
"""
loader = loadopt.set_relationship_strategy(attr, {"lazy": "joined"})
if innerjoin is not None:
loader.local_opts["innerjoin"] = innerjoin
return loader
@joinedload._add_unbound_fn
def joinedload(*keys, **kw):
return _UnboundLoad._from_keys(_UnboundLoad.joinedload, keys, False, kw)
@loader_option()
def subqueryload(loadopt, attr):
"""Indicate that the given attribute should be loaded using
subquery eager loading.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
examples::
# subquery-load the "orders" collection on "User"
query(User).options(subqueryload(User.orders))
# subquery-load Order.items and then Item.keywords
query(Order).options(
subqueryload(Order.items).subqueryload(Item.keywords))
# lazily load Order.items, but when Items are loaded,
# subquery-load the keywords collection
query(Order).options(
lazyload(Order.items).subqueryload(Item.keywords))
.. seealso::
:ref:`loading_toplevel`
:ref:`subquery_eager_loading`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "subquery"})
@subqueryload._add_unbound_fn
def subqueryload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.subqueryload, keys, False, {})
@loader_option()
def selectinload(loadopt, attr):
"""Indicate that the given attribute should be loaded using
SELECT IN eager loading.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
examples::
# selectin-load the "orders" collection on "User"
query(User).options(selectinload(User.orders))
# selectin-load Order.items and then Item.keywords
query(Order).options(
selectinload(Order.items).selectinload(Item.keywords))
# lazily load Order.items, but when Items are loaded,
# selectin-load the keywords collection
query(Order).options(
lazyload(Order.items).selectinload(Item.keywords))
.. versionadded:: 1.2
.. seealso::
:ref:`loading_toplevel`
:ref:`selectin_eager_loading`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "selectin"})
@selectinload._add_unbound_fn
def selectinload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.selectinload, keys, False, {})
@loader_option()
def lazyload(loadopt, attr):
"""Indicate that the given attribute should be loaded using "lazy"
loading.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
.. seealso::
:ref:`loading_toplevel`
:ref:`lazy_loading`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "select"})
@lazyload._add_unbound_fn
def lazyload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.lazyload, keys, False, {})
@loader_option()
def immediateload(loadopt, attr):
"""Indicate that the given attribute should be loaded using
an immediate load with a per-attribute SELECT statement.
The load is achieved using the "lazyloader" strategy and does not
fire off any additional eager loaders.
The :func:`.immediateload` option is superseded in general
by the :func:`.selectinload` option, which performs the same task
more efficiently by emitting a SELECT for all loaded objects.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
.. seealso::
:ref:`loading_toplevel`
:ref:`selectin_eager_loading`
"""
loader = loadopt.set_relationship_strategy(attr, {"lazy": "immediate"})
return loader
@immediateload._add_unbound_fn
def immediateload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.immediateload, keys, False, {})
@loader_option()
def noload(loadopt, attr):
"""Indicate that the given relationship attribute should remain unloaded.
The relationship attribute will return ``None`` when accessed without
producing any loading effect.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
:func:`_orm.noload` applies to :func:`_orm.relationship` attributes; for
column-based attributes, see :func:`_orm.defer`.
.. note:: Setting this loading strategy as the default strategy
for a relationship using the :paramref:`.orm.relationship.lazy`
parameter may cause issues with flushes, such if a delete operation
needs to load related objects and instead ``None`` was returned.
.. seealso::
:ref:`loading_toplevel`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "noload"})
@noload._add_unbound_fn
def noload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.noload, keys, False, {})
@loader_option()
def raiseload(loadopt, attr, sql_only=False):
"""Indicate that the given attribute should raise an error if accessed.
A relationship attribute configured with :func:`_orm.raiseload` will
raise an :exc:`~sqlalchemy.exc.InvalidRequestError` upon access. The
typical way this is useful is when an application is attempting to ensure
that all relationship attributes that are accessed in a particular context
would have been already loaded via eager loading. Instead of having
to read through SQL logs to ensure lazy loads aren't occurring, this
strategy will cause them to raise immediately.
:func:`_orm.raiseload` applies to :func:`_orm.relationship`
attributes only.
In order to apply raise-on-SQL behavior to a column-based attribute,
use the :paramref:`.orm.defer.raiseload` parameter on the :func:`.defer`
loader option.
:param sql_only: if True, raise only if the lazy load would emit SQL, but
not if it is only checking the identity map, or determining that the
related value should just be None due to missing keys. When False, the
strategy will raise for all varieties of relationship loading.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
.. versionadded:: 1.1
.. seealso::
:ref:`loading_toplevel`
:ref:`prevent_lazy_with_raiseload`
:ref:`deferred_raiseload`
"""
return loadopt.set_relationship_strategy(
attr, {"lazy": "raise_on_sql" if sql_only else "raise"}
)
@raiseload._add_unbound_fn
def raiseload(*keys, **kw):
return _UnboundLoad._from_keys(_UnboundLoad.raiseload, keys, False, kw)
@loader_option()
def defaultload(loadopt, attr):
"""Indicate an attribute should load using its default loader style.
This method is used to link to other loader options further into
a chain of attributes without altering the loader style of the links
along the chain. For example, to set joined eager loading for an
element of an element::
session.query(MyClass).options(
defaultload(MyClass.someattribute).
joinedload(MyOtherClass.someotherattribute)
)
:func:`.defaultload` is also useful for setting column-level options
on a related class, namely that of :func:`.defer` and :func:`.undefer`::
session.query(MyClass).options(
defaultload(MyClass.someattribute).
defer("some_column").
undefer("some_other_column")
)
.. seealso::
:meth:`_orm.Load.options` - allows for complex hierarchical
loader option structures with less verbosity than with individual
:func:`.defaultload` directives.
:ref:`relationship_loader_options`
:ref:`deferred_loading_w_multiple`
"""
return loadopt.set_relationship_strategy(attr, None)
@defaultload._add_unbound_fn
def defaultload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.defaultload, keys, False, {})
@loader_option()
def defer(loadopt, key, raiseload=False):
r"""Indicate that the given column-oriented attribute should be deferred,
e.g. not loaded until accessed.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
e.g.::
from sqlalchemy.orm import defer
session.query(MyClass).options(
defer("attribute_one"),
defer("attribute_two"))
session.query(MyClass).options(
defer(MyClass.attribute_one),
defer(MyClass.attribute_two))
To specify a deferred load of an attribute on a related class,
the path can be specified one token at a time, specifying the loading
style for each link along the chain. To leave the loading style
for a link unchanged, use :func:`_orm.defaultload`::
session.query(MyClass).options(defaultload("someattr").defer("some_column"))
A :class:`_orm.Load` object that is present on a certain path can have
:meth:`_orm.Load.defer` called multiple times,
each will operate on the same
parent entity::
session.query(MyClass).options(
defaultload("someattr").
defer("some_column").
defer("some_other_column").
defer("another_column")
)
:param key: Attribute to be deferred.
:param raiseload: raise :class:`.InvalidRequestError` if the column
value is to be loaded from emitting SQL. Used to prevent unwanted
SQL from being emitted.
.. versionadded:: 1.4
.. seealso::
:ref:`deferred_raiseload`
:param \*addl_attrs: This option supports the old 0.8 style
of specifying a path as a series of attributes, which is now superseded
by the method-chained style.
.. deprecated:: 0.9 The \*addl_attrs on :func:`_orm.defer` is
deprecated and will be removed in a future release. Please
use method chaining in conjunction with defaultload() to
indicate a path.
.. seealso::
:ref:`deferred`
:func:`_orm.undefer`
"""
strategy = {"deferred": True, "instrument": True}
if raiseload:
strategy["raiseload"] = True
return loadopt.set_column_strategy((key,), strategy)
@defer._add_unbound_fn
def defer(key, *addl_attrs, **kw):
if addl_attrs:
util.warn_deprecated(
"The *addl_attrs on orm.defer is deprecated. Please use "
"method chaining in conjunction with defaultload() to "
"indicate a path.",
version="1.3",
)
return _UnboundLoad._from_keys(
_UnboundLoad.defer, (key,) + addl_attrs, False, kw
)
@loader_option()
def undefer(loadopt, key):
r"""Indicate that the given column-oriented attribute should be undeferred,
e.g. specified within the SELECT statement of the entity as a whole.
The column being undeferred is typically set up on the mapping as a
:func:`.deferred` attribute.
This function is part of the :class:`_orm.Load` interface and supports
both method-chained and standalone operation.
Examples::
# undefer two columns
session.query(MyClass).options(undefer("col1"), undefer("col2"))
# undefer all columns specific to a single class using Load + *
session.query(MyClass, MyOtherClass).options(
Load(MyClass).undefer("*"))
# undefer a column on a related object
session.query(MyClass).options(
defaultload(MyClass.items).undefer('text'))
:param key: Attribute to be undeferred.
:param \*addl_attrs: This option supports the old 0.8 style
of specifying a path as a series of attributes, which is now superseded
by the method-chained style.
.. deprecated:: 0.9 The \*addl_attrs on :func:`_orm.undefer` is
deprecated and will be removed in a future release. Please
use method chaining in conjunction with defaultload() to
indicate a path.
.. seealso::
:ref:`deferred`
:func:`_orm.defer`
:func:`_orm.undefer_group`
"""
return loadopt.set_column_strategy(
(key,), {"deferred": False, "instrument": True}
)
@undefer._add_unbound_fn
def undefer(key, *addl_attrs):
if addl_attrs:
util.warn_deprecated(
"The *addl_attrs on orm.undefer is deprecated. Please use "
"method chaining in conjunction with defaultload() to "
"indicate a path.",
version="1.3",
)
return _UnboundLoad._from_keys(
_UnboundLoad.undefer, (key,) + addl_attrs, False, {}
)
@loader_option()
def undefer_group(loadopt, name):
"""Indicate that columns within the given deferred group name should be
undeferred.
The columns being undeferred are set up on the mapping as
:func:`.deferred` attributes and include a "group" name.
E.g::
session.query(MyClass).options(undefer_group("large_attrs"))
To undefer a group of attributes on a related entity, the path can be
spelled out using relationship loader options, such as
:func:`_orm.defaultload`::
session.query(MyClass).options(
defaultload("someattr").undefer_group("large_attrs"))
.. versionchanged:: 0.9.0 :func:`_orm.undefer_group` is now specific to a
particular entity load path.
.. seealso::
:ref:`deferred`
:func:`_orm.defer`
:func:`_orm.undefer`
"""
return loadopt.set_column_strategy(
"*", None, {"undefer_group_%s" % name: True}, opts_only=True
)
@undefer_group._add_unbound_fn
def undefer_group(name):
return _UnboundLoad().undefer_group(name)
@loader_option()
def with_expression(loadopt, key, expression):
r"""Apply an ad-hoc SQL expression to a "deferred expression" attribute.
This option is used in conjunction with the :func:`_orm.query_expression`
mapper-level construct that indicates an attribute which should be the
target of an ad-hoc SQL expression.
E.g.::
sess.query(SomeClass).options(
with_expression(SomeClass.x_y_expr, SomeClass.x + SomeClass.y)
)
.. versionadded:: 1.2
:param key: Attribute to be undeferred.
:param expr: SQL expression to be applied to the attribute.
.. note:: the target attribute is populated only if the target object
is **not currently loaded** in the current :class:`_orm.Session`
unless the :meth:`_query.Query.populate_existing` method is used.
Please refer to :ref:`mapper_querytime_expression` for complete
usage details.
.. seealso::
:ref:`mapper_querytime_expression`
"""
expression = coercions.expect(
roles.LabeledColumnExprRole, _orm_full_deannotate(expression)
)
return loadopt.set_column_strategy(
(key,), {"query_expression": True}, opts={"expression": expression}
)
@with_expression._add_unbound_fn
def with_expression(key, expression):
return _UnboundLoad._from_keys(
_UnboundLoad.with_expression, (key,), False, {"expression": expression}
)
@loader_option()
def selectin_polymorphic(loadopt, classes):
"""Indicate an eager load should take place for all attributes
specific to a subclass.
This uses an additional SELECT with IN against all matched primary
key values, and is the per-query analogue to the ``"selectin"``
setting on the :paramref:`.mapper.polymorphic_load` parameter.
.. versionadded:: 1.2
.. seealso::
:ref:`polymorphic_selectin`
"""
loadopt.set_class_strategy(
{"selectinload_polymorphic": True},
opts={
"entities": tuple(
sorted((inspect(cls) for cls in classes), key=id)
)
},
)
return loadopt
@selectin_polymorphic._add_unbound_fn
def selectin_polymorphic(base_cls, classes):
ul = _UnboundLoad()
ul.is_class_strategy = True
ul.path = (inspect(base_cls),)
ul.selectin_polymorphic(classes)
return ul
| 33.792837
| 84
| 0.594157
|
a055e5cbee1508d2e487a18048e270d5707d81ff
| 273
|
py
|
Python
|
tests/artificial/transf_Quantization/trend_PolyTrend/cycle_0/ar_12/test_artificial_1024_Quantization_PolyTrend_0_12_0.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/artificial/transf_Quantization/trend_PolyTrend/cycle_0/ar_12/test_artificial_1024_Quantization_PolyTrend_0_12_0.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 1
|
2019-11-30T23:39:38.000Z
|
2019-12-01T04:34:35.000Z
|
tests/artificial/transf_Quantization/trend_PolyTrend/cycle_0/ar_12/test_artificial_1024_Quantization_PolyTrend_0_12_0.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 0, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 12);
| 39
| 168
| 0.736264
|
761a87db27080fe6922d1454cb1a54a1e7cfff7e
| 740
|
py
|
Python
|
nomadgram/users/signals.py
|
juyoungpark718/juyoungram
|
24182afb4d3076ba9a8614846368781883f255fa
|
[
"MIT"
] | null | null | null |
nomadgram/users/signals.py
|
juyoungpark718/juyoungram
|
24182afb4d3076ba9a8614846368781883f255fa
|
[
"MIT"
] | 12
|
2021-03-02T01:12:10.000Z
|
2022-03-03T23:07:52.000Z
|
nomadgram/users/signals.py
|
juyoungpark718/juyoungram
|
24182afb4d3076ba9a8614846368781883f255fa
|
[
"MIT"
] | null | null | null |
from allauth.account.signals import user_signed_up
from django.dispatch import receiver
from io import BytesIO
from urllib.request import urlopen
from django.core.files import File
@receiver(user_signed_up)
def user_signed_up(request, user, **kwargs):
if len(user.socialaccount_set.all()) > 0:
social_account = user.socialaccount_set.all()[0]
uid = social_account.uid
gender = social_account.extra_data.get('gender', None)
user.gender = gender
avatar = social_account.get_avatar_url()
avatar_image = urlopen(avatar)
io = BytesIO(avatar_image.read())
user.profile_image.save('{}.jpg'.format(uid), File(io))
user.name = user.get_full_name()
user.save()
| 37
| 63
| 0.695946
|
96a831aee422d5aa9171604c431e642108469686
| 567
|
py
|
Python
|
setup.py
|
cthurber/tweetpurge
|
c18c2e65edfea49076d3cb3df97fef9f27911a93
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
cthurber/tweetpurge
|
c18c2e65edfea49076d3cb3df97fef9f27911a93
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
cthurber/tweetpurge
|
c18c2e65edfea49076d3cb3df97fef9f27911a93
|
[
"BSD-2-Clause"
] | null | null | null |
import os,sys
if os.path.isfile('.apikey') == False:
consumerKey = input("Consumer Key: ")
consumerSecret = input("Consumer Secret: ")
accessToken = input("Access Token: ")
accessTokenSecret = input("Access Token Secret: ")
printArray = [consumerKey,consumerSecret,accessToken,accessTokenSecret]
with open(".apikey",'w') as apiinfo:
for item in printArray:
print(item,file=apiinfo)
os.system("rm .ignoreIDs")
os.system("touch .ignoreIDs")
print("Setup complete")
runOr = input("Run purge.py now? (y/n): ")
if runOr == 'y':
os.system("python ./purge.py")
| 31.5
| 72
| 0.707231
|
c35f4d92bc4e82ba7dd7378a71bb2a38671ccd44
| 372
|
py
|
Python
|
insights/parsers/tests/test_ksmstate.py
|
mglantz/insights-core
|
6f20bbbe03f53ee786f483b2a28d256ff1ad0fd4
|
[
"Apache-2.0"
] | 1
|
2020-02-19T06:36:22.000Z
|
2020-02-19T06:36:22.000Z
|
insights/parsers/tests/test_ksmstate.py
|
mglantz/insights-core
|
6f20bbbe03f53ee786f483b2a28d256ff1ad0fd4
|
[
"Apache-2.0"
] | 10
|
2018-04-16T15:38:04.000Z
|
2018-05-15T18:43:02.000Z
|
insights/parsers/tests/test_ksmstate.py
|
mglantz/insights-core
|
6f20bbbe03f53ee786f483b2a28d256ff1ad0fd4
|
[
"Apache-2.0"
] | null | null | null |
from insights.parsers import ksmstate
from insights.tests import context_wrap
KSMSTATE0 = "0"
KSMSTATE1 = "1"
def test_is_running_0():
ksm_info = ksmstate.is_running(context_wrap(KSMSTATE0))
assert ksm_info.get('running') is False
def test_is_running_1():
ksm_info = ksmstate.is_running(context_wrap(KSMSTATE1))
assert ksm_info.get('running') is True
| 23.25
| 59
| 0.760753
|
87d7ac98945dfaeaa8aef0e50928282ac73e8067
| 2,614
|
py
|
Python
|
donno/converters/joplin.py
|
leetschau/donno
|
d7f5651df997966c249f6350f59ed3321ffd128d
|
[
"MIT"
] | null | null | null |
donno/converters/joplin.py
|
leetschau/donno
|
d7f5651df997966c249f6350f59ed3321ffd128d
|
[
"MIT"
] | null | null | null |
donno/converters/joplin.py
|
leetschau/donno
|
d7f5651df997966c249f6350f59ed3321ffd128d
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from datetime import datetime
import json
import re
import sh
from donno.config import get_attr
configs = get_attr()
def f2dict(fp: str) -> dict:
with open(fp) as f:
res = json.load(f)
return res
def get_slug(notebook: dict, nbs: list) -> str:
if len(notebook['parent_id']) == 0:
parent_slug = ''
else:
parent_nb = [x for x in nbs
if x['id'] == notebook['parent_id']][0]
parent_slug = get_slug(parent_nb)
return parent_slug + '/' + notebook['title']
def import_json(import_folder):
dicts = map(f2dict, Path(import_folder).glob('*.json'))
notes = []
notebooks = []
resources = []
tags = []
relations = []
for adict in dicts:
if adict['type_'] == 1:
notes.append(adict)
elif adict['type_'] == 2:
notebooks.append(adict)
elif adict['type_'] == 3:
print('id:', adict['id'], 'type:', 3)
elif adict['type_'] == 4:
resources.append(adict)
elif adict['type_'] == 5:
tags.append(adict)
elif adict['type_'] == 6:
relations.append(adict)
for rel in relations:
the_tag = [x for x in tags if x['id'] == rel['tag_id']][0]
the_note = [x for x in notes if x['id'] == rel['note_id']][0]
if 'tag' not in the_note:
the_note['tag'] = []
the_note['tag'].append(the_tag['title'])
for nb in notebooks:
nb['slug'] = get_slug(nb)
jop_link = re.compile(r'!\[\w{32}\.(\w{1,5})\]\(:/(\w{32})\)',
re.MULTILINE)
for note in notes:
nb = [x for x in notebooks if x['id'] == note['parent_id']][0]
body = re.sub(jop_link, r'', note['body'])
fn = f"{note['id']}.md"
tags = note['tag'] if 'tag' in note else ''
created = datetime.utcfromtimestamp(int(note['user_created_time']) /
1000).strftime('%Y-%m-%d %H:%M:%S')
updated = datetime.utcfromtimestamp(int(note['user_updated_time']) /
1000).strftime('%Y-%m-%d %H:%M:%S')
print(f'Write to {fn} ...')
with open(fn, 'w') as f:
md_note = (f"Title: {note['title']}\n"
f"Tags: {tags}\n"
f"Notebook: {nb['slug']}\n"
f"Created: {created}\n"
f"Updated: {updated}\n"
"\n------\n\n") + body
f.write(md_note)
sh.mv(fn, configs['repo'])
| 32.271605
| 79
| 0.494262
|
e5423cee00d430015cac0c19e4add669086390f5
| 1,819
|
py
|
Python
|
featureskagg.py
|
fanteastick/ML-SKI
|
e440e2b4c43a42b2823d99bc3c7ab7c11da868a0
|
[
"Apache-2.0"
] | 2
|
2018-11-05T06:38:46.000Z
|
2019-08-09T18:43:41.000Z
|
featureskagg.py
|
fanteastick/ML-SKI
|
e440e2b4c43a42b2823d99bc3c7ab7c11da868a0
|
[
"Apache-2.0"
] | null | null | null |
featureskagg.py
|
fanteastick/ML-SKI
|
e440e2b4c43a42b2823d99bc3c7ab7c11da868a0
|
[
"Apache-2.0"
] | null | null | null |
#importing
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import re
import numpy as np
from sklearn import tree
from sklearn.model_selection import GridSearchCV
sns.set()
df_train = pd.read_csv('train.csv')
df_test = pd.read_csv('test.csv')
#store target variable of training data in safe place
survived_train = df_train.Survived
#concatenate training n test sets
data = pd.concat([df_train.drop(['Survived'], axis=1), df_test])
# Extract Title from Name, store in column and plot barplot
data['Title'] = data.Name.apply(lambda x: re.search(' ([A-Z][a-z]+)\.', x).group(1))
sns.countplot(x='Title', data=data);
plt.xticks(rotation=45);
#view a barplot of all the titles
data['Title'] = data['Title'].replace({'Mlle':'Miss', 'Mme':'Mrs', 'Ms':'Miss'})
data['Title'] = data['Title'].replace(['Don', 'Dona', 'Rev', 'Dr', 'Major', 'Lady', 'Sir', 'Col', 'Capt', 'Countess', 'Jonkheer'],'Special')
sns.countplot(x='Title', data=data);
plt.xticks(rotation=45);
#plt.show()
#making a 'has cabin' feature
data['Has_Cabin']= ~data.Cabin.isnull()
#drop columns
data.drop(['Cabin', 'Name', 'PassengerId', 'Ticket'], axis=1, inplace=True)
#fill unfinished columns with na
data['Age'] = data.Age.fillna(data.Age.median())
data['Fare'] = data.Fare.fillna(data.Fare.median())
data['Embarked'] = data['Embarked'].fillna('S')
print (data.info())
#bin the numerical data
data['CatAge'] = pd.qcut(data.Age, q=4, labels=False)
data['CatFare'] = pd.qcut(data.Fare, q=4, labels=False)
#drop the data you just binned
data=data.drop(['Age','Fare'], axis=1)
#column of family members on board
data['Fam_Size'] = data.Parch + data.SibSp
data= data.drop(['SibSp', 'Parch'], axis=1)
#transform everything to binary variables
#data_dum = pd.get_dummies(data, drop_first = True)
#print(data_dum.head())
| 27.560606
| 140
| 0.698736
|
df1471fa0aa10248802b161fa0964f2bd552447b
| 8,351
|
py
|
Python
|
ultrasync/common.py
|
chatziko/ultrasync
|
1bb2ee265a62e6f3da0889b73467e4a602dc71f2
|
[
"MIT"
] | 8
|
2020-01-15T18:23:17.000Z
|
2021-10-30T22:04:41.000Z
|
ultrasync/common.py
|
chatziko/ultrasync
|
1bb2ee265a62e6f3da0889b73467e4a602dc71f2
|
[
"MIT"
] | 10
|
2020-11-13T23:46:21.000Z
|
2022-03-04T21:53:29.000Z
|
ultrasync/common.py
|
chatziko/ultrasync
|
1bb2ee265a62e6f3da0889b73467e4a602dc71f2
|
[
"MIT"
] | 2
|
2020-06-12T11:16:17.000Z
|
2022-02-22T09:26:38.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
class NX595EVendor(object):
"""
Defines the possible NX-595E vendors. This greatly affects how the
control panel is parsed
"""
# Interlogix ZeroWire UltraSync
ZEROWIRE = 'zerowire'
# ComNav UltraSync
COMNAV = 'comnav'
# xGen UltraSync
XGEN = 'xgen'
# xGen8 UltraSync
XGEN8 = 'xgen8'
class XGZWPanelFunction(object):
"""
Interlogix and and xGen ZeroWire Function Commands
"""
AREA_DISARM = 0
AREA_STAY = 1
AREA_CHIME_TOGGLE = 10
AREA_AWAY = 15
class CNPanelFunction(object):
"""
ComNav Function Commands
"""
AREA_CHIME_TOGGLE = 1
AREA_DISARM = 16
AREA_AWAY = 17
AREA_STAY = 18
class XGZWAreaBank(object):
"""
Defines the Bank Identifiers for Interlogix xGen ZeroWire Area Queries
By identifying the bank and breaking it down, it makes the code
MUCH easier to read for others. It also makes the code easier to
debug down the road.
This list has been purely generated through reverse engineering the
information available to me using the existing Ultrasync Alarm Panel
interface.
"""
UNKWN_00 = 0
UNKWN_01 = 1
PARTIAL = 2
ARMED = 3
EXIT_MODE01 = 4
EXIT_MODE02 = 5
UNKWN_06 = 6
INSTANT = 7
UNKWN_08 = 8
UNKWN_09 = 9
UNKWN_10 = 10
UNKWN_11 = 11
UNKWN_12 = 12
UNKWN_13 = 13
UNKWN_14 = 14
UNKWN_15 = 15
UNKWN_16 = 16
UNKWN_17 = 17
CHIME = 18
UNKWN_19 = 19
UNKWN_20 = 20
UNKWN_21 = 21
UNKWN_22 = 22
UNKWN_23 = 23
UNKWN_24 = 24
UNKWN_25 = 25
NIGHT = 26
UNKWN_27 = 27
UNKWN_28 = 28
UNKWN_29 = 29
UNKWN_30 = 30
UNKWN_31 = 31
UNKWN_32 = 32
UNKWN_33 = 33
UNKWN_34 = 34
UNKWN_35 = 35
UNKWN_36 = 36
UNKWN_37 = 37
class CNAreaBank(object):
"""
Defines the Bank Identifiers for ComNav Area Queries
By identifying the bank and breaking it down, it makes the code
MUCH easier to read for others. It also makes the code easier to
debug down the road.
This list has been purely generated through reverse engineering the
information available to me using the existing Ultrasync Alarm Panel
interface.
"""
ARMED = 0
PARTIAL = 1
UNKWN_02 = 2
UNKWN_03 = 3
UNKWN_04 = 4
UNKWN_05 = 5
UNKWN_06 = 6
EXIT_MODE01 = 7
EXIT_MODE02 = 8
UNKWN_09 = 9
UNKWN_10 = 10
UNKWN_11 = 11
UNKWN_12 = 12
UNKWN_13 = 13
UNKWN_14 = 14
CHIME = 15
UNKWN_16 = 16
class AreaStatus(object):
"""
Defines the possible panel display status messages
"""
# All sensor are active; occupants are not present.
ARMED_AWAY = 'Armed Away'
# Alarm state when user is present in the home; only perimeter sensors
# are activated.
ARMED_STAY = 'Armed Stay'
READY = 'Ready'
ALARM_FIRE = 'Fire Alarm'
ALARM_BURGLAR = 'Burglar Alarm'
ALARM_PANIC = 'Panic Alarm'
ALARM_MEDICAL = 'Medical Alarm'
DELAY_EXIT_1 = 'Exit Delay 1'
DELAY_EXIT_2 = 'Exit Delay 2'
DELAY_ENTRY = 'Entry Delay'
SENSOR_BYPASS = 'Sensor Bypass'
SENSOR_TROUBLE = 'Sensor Trouble',
SENSOR_TAMPER = 'Sensor Tamper',
SENSOR_BATTERY = 'Sensor Low Battery',
SENSOR_SUPERVISION = 'Sensor Supervision',
NOT_READY = 'Not Ready'
NOT_READY_FORCEABLE = 'Not Ready'
DISARMED = 'Disarm'
AREA_STATES = (
# These are intentially set and the order is very important
# entries missing in this array but defined above is an intentional thing
# These states are the same on both the ComNav and Interlogix systems
AreaStatus.ARMED_AWAY, # bank index 0
AreaStatus.ARMED_STAY, # bank index 1
AreaStatus.READY, # bank index 2
AreaStatus.ALARM_FIRE, # bank index 3
AreaStatus.ALARM_BURGLAR, # bank index 4
AreaStatus.ALARM_PANIC, # bank index 5
AreaStatus.ALARM_MEDICAL, # bank index 6
AreaStatus.DELAY_EXIT_1, # bank index 7
AreaStatus.DELAY_EXIT_2, # bank index 8
AreaStatus.DELAY_ENTRY, # bank index 9
AreaStatus.SENSOR_BYPASS, # bank index 10
AreaStatus.SENSOR_TROUBLE, # bank index 11
AreaStatus.SENSOR_TAMPER, # bank index 12
AreaStatus.SENSOR_BATTERY, # bank index 13
AreaStatus.SENSOR_SUPERVISION, # bank index 14
# Last entry empty
'', # bank index 15
)
AREA_STATUS_PROCESS_PRIORITY = (
# The processing order of the area status messages. We prioritize them to
# ease the on the logic used to determine which one should be displayed if
# more then one is set. The first matched entry when read from top down
# is always used. The index corresponds with the bank index defined above.
3, # AreaStatus.ALARM_FIRE
4, # AreaStatus.ALARM_BURGLAR
5, # AreaStatus.ALARM_PANIC
6, # AreaStatus.ALARM_MEDICAL
7, # AreaStatus.DELAY_EXIT_1
8, # AreaStatus.DELAY_EXIT_2
9, # AreaStatus.DELAY_ENTRY
0, # AreaStatus.ARMED_AWAY
1, # AreaStatus.ARMED_STAY
10, # AreaStatus.SENSOR_BYPASS
11, # AreaStatus.SENSOR_TROUBLE
12, # AreaStatus.SENSOR_TAMPER
13, # AreaStatus.SENSOR_BATTERY
14, # AreaStatus.SENSOR_SUPERVISION
2, # AreaStatus.READY
# Last entry empty
15,
)
class ZoneBank(object):
"""
Defines the Bank Identifiers for Zone/Sensor Queries
By identifying the bank and breaking it down, it makes the code
MUCH easier to read for others. It also makes the code easier to
debug down the road.
This list has been purely generated through reverse engineering the
information available to me using the existing Ultrasync Alarm Panel
interface.
"""
UNKWN_00 = 0
UNKWN_01 = 1
UNKWN_02 = 2
UNKWN_03 = 3
UNKWN_04 = 4
UNKWN_05 = 5
UNKWN_06 = 6
UNKWN_07 = 7
UNKWN_08 = 8
UNKWN_09 = 9
UNKWN_10 = 10
UNKWN_11 = 11
UNKWN_12 = 12
UNKWN_13 = 13
# The following 4 bank ID's below are only available on the Interlogix
# ZeroWire
UNKWN_14 = 14
UNKWN_15 = 15
UNKWN_16 = 16
BYPASS_DISABLED = 17
class ZoneStatus(object):
"""
Defines the possible panel display zone/sensor status
"""
READY = 'Ready'
NOT_READY = 'Not Ready'
ZONE_STATES = (
ZoneStatus.NOT_READY,
'Tamper',
'Trouble',
'',
'Inhibited',
'Alarm',
'Low Battery',
'Supervision Fault',
'Test Fail',
'',
'Entry Delay',
'',
'Test Active',
'Activity Fail',
'Antimask',
)
class AlarmScene(object):
"""
Defines the different alarm panel states and/or macros
"""
# All sensor are active; occupants are not present.
AWAY = 'away'
# Alarm state when user is present in the home; only perimeter sensors
# are activated.
STAY = 'stay'
# Alarm system is disarmed
DISARMED = 'disarm'
# A list of all valid alarm states used for validating
ALARM_SCENES = (
AlarmScene.AWAY,
AlarmScene.STAY,
AlarmScene.DISARMED,
)
| 24.854167
| 79
| 0.66052
|
7cb2256ccf21ab27084ce7486182f7f6ad17fbac
| 336
|
py
|
Python
|
s41.py
|
glcrazier/LeetCodePlay
|
cf951a079d458e02000d170529cb1e3b049da023
|
[
"MIT"
] | 1
|
2018-02-20T13:56:02.000Z
|
2018-02-20T13:56:02.000Z
|
s41.py
|
glcrazier/LeetCodePlay
|
cf951a079d458e02000d170529cb1e3b049da023
|
[
"MIT"
] | null | null | null |
s41.py
|
glcrazier/LeetCodePlay
|
cf951a079d458e02000d170529cb1e3b049da023
|
[
"MIT"
] | null | null | null |
from solution import Solution
if __name__ == '__main__':
sol = Solution()
print sol.firstMissingPositive([1,2,0])
print sol.firstMissingPositive([3,4,-1,1])
print sol.firstMissingPositive([])
print sol.firstMissingPositive([1])
print sol.firstMissingPositive([2])
print sol.firstMissingPositive([1,2])
| 28
| 46
| 0.696429
|
232c61e0a6ebd976f72f3e0a8b5b3a1ac70345ec
| 3,232
|
py
|
Python
|
app_PyQtArnold_RenderTest/Core/app_PyQtArnold_core.py
|
fidelm02/CS_TestsDec
|
10ac9a72c214fd4e48fd3e2f0a85bee6227ba287
|
[
"Unlicense"
] | null | null | null |
app_PyQtArnold_RenderTest/Core/app_PyQtArnold_core.py
|
fidelm02/CS_TestsDec
|
10ac9a72c214fd4e48fd3e2f0a85bee6227ba287
|
[
"Unlicense"
] | null | null | null |
app_PyQtArnold_RenderTest/Core/app_PyQtArnold_core.py
|
fidelm02/CS_TestsDec
|
10ac9a72c214fd4e48fd3e2f0a85bee6227ba287
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Methods that has nos direct relation
with the ui
"""
import os
from PyQt4 import QtGui, QtCore
from arnold import *
class CoreFunctions(QtCore.QThread):
renderSignal = QtCore.pyqtSignal()
def __init__(self):
QtCore.QThread.__init__(self)
self.r = self.g = self.b = 100
self.log=None
self.image_path = None
def createSphere(self):
# create a sphere geometric primitive
self.sphere = AiNode("sphere")
AiNodeSetStr(self.sphere, "name", "geo_sphere")
AiNodeSetVec(self.sphere, "center", 0.0, 0.0, 0.0)
AiNodeSetFlt(self.sphere, "radius", 6.0)
# create a red standard shader
self.shader = AiNode("standard")
AiNodeSetStr(self.shader, "name", "ai_shader")
AiNodeSetRGB(self.shader,
"Kd_color", self.r,
self.g, self.b)
AiNodeSetFlt(self.shader, "Ks", 0.01)
# assign the shaders to the geometric objects
AiNodeSetPtr(self.sphere, "shader", self.shader)
def createCamera(self):
"""
Create camera
"""
self.camera = AiNode("persp_camera")
AiNodeSetStr(self.camera, "name", "cam")
# Camera position
AiNodeSetVec(self.camera, "position", 0.0, 0.0, 40.0)
AiNodeSetVec(self.camera, "look_at", 0.0, 0.0, 0.0)
AiNodeSetFlt(self.camera, "fov", 45.0)
def createLights(self):
"""
Create light
"""
self.light = AiNode("point_light")
AiNodeSetStr(self.light, "name", "light_01")
#Light position
AiNodeSetVec(self.light, "position", 0.0, 15.0, 45.0)
AiNodeSetFlt(self.light, "exposure", 2.0)
AiNodeSetFlt(self.light, "intensity", 8.0)
def render_settings(self):
"""
Set render settings
"""
ops = AiUniverseGetOptions()
AiNodeSetInt(ops, "AA_samples", 8)
AiNodeSetInt(ops, "xres", 512)
AiNodeSetInt(ops, "yres", 384)
AiNodeSetInt(ops, "GI_diffuse_depth", 4)
# create an output driver node
driver = AiNode("driver_jpeg")
AiNodeSetStr(driver, "name", "_driver")
AiNodeSetStr(driver, "filename", self.image_path)
AiNodeSetFlt(driver, "gamma", 2.2)
# create a gaussian filter node
filter = AiNode("gaussian_filter")
AiNodeSetStr(filter, "name", "_filter")
# assign the driver and filter to the main (beauty) AOV,
# which is called "RGBA" and is of type RGBA
outs_ = AiArrayAllocate(1, 1, AI_TYPE_STRING)
AiArraySetStr(outs_, 0, "RGBA RGBA _filter _driver")
AiNodeSetArray(ops, "outputs", outs_)
def run(self):
# Begin arnold process
AiBegin()
#Connecting log
AiMsgSetLogFileName(self.log)
AiMsgSetConsoleFlags(AI_LOG_ALL)
self.createSphere()
self.createCamera()
self.createLights()
self.render_settings()
AiRender(AI_RENDER_MODE_CAMERA)
# End arnold process
AiEnd()
self.renderSignal.emit()
def setColor(self, r, g, b):
self.r = r * 255
self.g = g * 255
self.b = b * 255
| 29.117117
| 64
| 0.589109
|
a357777ac9ad9e3679452b0a3067689833ac37da
| 1,956
|
py
|
Python
|
robosuite-utils/robosuit-tests/test5.py
|
jdj2261/robosuite
|
c708fbb69459b74bdfebb3d9e1b8ab3ad3480a19
|
[
"MIT"
] | null | null | null |
robosuite-utils/robosuit-tests/test5.py
|
jdj2261/robosuite
|
c708fbb69459b74bdfebb3d9e1b8ab3ad3480a19
|
[
"MIT"
] | null | null | null |
robosuite-utils/robosuit-tests/test5.py
|
jdj2261/robosuite
|
c708fbb69459b74bdfebb3d9e1b8ab3ad3480a19
|
[
"MIT"
] | null | null | null |
from robosuite import load_controller_config
from robosuite.models import MujocoWorldBase
from robosuite.models.arenas.table_arena import TableArena
from robosuite.models.robots import Panda
from robosuite.models.base import MujocoXML
from robosuite.utils import SimulationError
from robosuite.robots import SingleArm
from mujoco_py import MjSim, MjViewer, load_model_from_path, load_model_from_xml
if __name__ == '__main__':
world = MujocoWorldBase()
controller_config = load_controller_config(
default_controller="JOINT_VELOCITY")
controller_config["output_max"] = 1.0
controller_config["output_min"] = -1.0
control_freq = 50
horizon = 1000
mujoco_arena = TableArena()
mujoco_arena.set_origin([0.8, 0, 0])
robot = SingleArm(
robot_type="Panda",
idn=0,
gripper_type=None,
controller_config=controller_config,
# initial_qpos=[0.0, 0.7, 0.0, -1.4, 0.0, -0.56, 0.0],
# initialization_noise=robot_noise,
# gripper_type="PandaGripper",
control_freq=control_freq,
)
# print(robot.controller_config)
robot.load_model()
robot.robot_model.set_base_xpos([0, 0, 0])
world.merge(mujoco_arena)
world.merge(robot.robot_model)
sim = MjSim(world.get_model())
viewer = MjViewer(sim)
viewer.vopt.geomgroup[0] = 0
sim.reset()
robot.reset_sim(sim)
robot.setup_references()
robot.reset(deterministic=False)
# sim.forward()
for i in range(horizon):
# action = np.random.randn(9)
action = [1, 0, 0, 0, 0, 0, 0]
policy_step = True
# for i in range(int(control_timestep / model_timestep)):
sim.forward()
# pre_action(sim, action, policy_step)
robot.control(action=action, policy_step=policy_step)
print(robot.torques, robot._joint_positions, robot._joint_velocities)
policy_step = False
sim.step()
viewer.render()
| 30.5625
| 80
| 0.683538
|
bb37e719415f89ec61b2e640d352adbc4f070bbf
| 7,153
|
py
|
Python
|
qpid_bow/asyncio.py
|
forgetsemicolon/qpid-bow
|
eed751b7d6b26ebfb3d7cb2c79c4ab38dcf503ef
|
[
"MIT"
] | 10
|
2018-06-25T11:48:01.000Z
|
2022-01-24T01:56:30.000Z
|
qpid_bow/asyncio.py
|
forgetsemicolon/qpid-bow
|
eed751b7d6b26ebfb3d7cb2c79c4ab38dcf503ef
|
[
"MIT"
] | 10
|
2018-06-25T10:36:26.000Z
|
2020-10-03T00:49:00.000Z
|
qpid_bow/asyncio.py
|
forgetsemicolon/qpid-bow
|
eed751b7d6b26ebfb3d7cb2c79c4ab38dcf503ef
|
[
"MIT"
] | 8
|
2018-06-25T09:06:00.000Z
|
2021-05-04T23:47:41.000Z
|
import asyncio
import time
from logging import getLogger
from typing import List, Union
from proton import Connection, Receiver, Sender, Session, Url
from proton.reactor import Container as BaseContainer, LinkOption
from proton.handlers import IOHandler
logger = getLogger()
class AsyncioReactorHandler:
"""Qpid Proton Reactor Global Loop Handler for Python asyncio.
This implementation will setup Qpid Proton's Selectables to use asyncio's
writable/readable event handling.
Based on Tornado implementation:
https://qpid.apache.org/releases/qpid-proton-0.18.1/proton/python/examples/proton_tornado.py.html
Args:
loop: An asyncio event loop
handler_base: An IO Handler
"""
def __init__(self, loop=None, handler_base=None):
self.loop = loop or asyncio.get_event_loop()
self.io = handler_base or IOHandler()
self._count = 0
self._reactor = None
def on_reactor_init(self, event):
logger.debug("Reactor initted")
self._reactor = event.reactor
def on_reactor_quiesced(self, event): # pylint: disable=no-self-use
# check if we are still quiesced, other handlers of
# on_reactor_quiesced could have produced events to process
if not event.reactor.quiesced:
return
logger.debug("Reactor quiesced")
event.reactor.yield_()
def on_unhandled(self, name, event): # pylint: disable=unused-argument
event.dispatch(self.io)
def _schedule(self, selectable):
if selectable.deadline:
logger.debug("Setting up schedule for %s", selectable)
self.loop.call_later(
selectable.deadline - time.time(),
lambda: self._scheduled_selectable_expired(selectable))
def _scheduled_selectable_expired(self, selectable):
logger.debug("Scheduled selectable %s expired", selectable)
selectable.expired()
self._process()
def _process(self):
self._reactor.process()
if not self._reactor.quiesced:
self.loop.call_soon_threadsafe(self._process)
def _selectable_readable(self, selectable):
logger.debug("Readable callback: %s", selectable)
selectable.readable()
self._process()
def _selectable_writable(self, selectable):
logger.debug("Writable callback: %s", selectable)
selectable.writable()
self._process()
def _setup_selectable(self, selectable):
if selectable.reading:
logger.debug("Setting up reader for %s", selectable)
self.loop.add_reader(
selectable.fileno(), self._selectable_readable, selectable)
if selectable.writing:
logger.debug("Setting up writer for %s", selectable)
self.loop.add_writer(
selectable.fileno(), self._selectable_writable, selectable)
def _teardown_selectable(self, selectable):
logger.debug("Resetting %s", selectable)
self.loop.remove_writer(selectable.fileno())
self.loop.remove_reader(selectable.fileno())
def on_selectable_init(self, event):
selectable = event.context
if selectable.fileno() >= 0:
self._setup_selectable(selectable)
self._schedule(selectable)
self._count += 1
def on_selectable_updated(self, event):
selectable = event.context
if selectable.fileno() > 0:
self._teardown_selectable(selectable)
self._setup_selectable(selectable)
self._schedule(selectable)
def on_selectable_final(self, event):
selectable = event.context
if selectable.fileno() > 0:
self._teardown_selectable(selectable)
logger.debug("Selectable final %s", selectable)
selectable.release()
self._count -= 1
if self._count == 0:
self.loop.call_soon_threadsafe(self._stop)
def _stop(self):
if self._reactor:
logger.debug("Stopping reactor")
self._reactor.stop()
# Needs to be called Container for magic reasons :(
class Container(BaseContainer):
"""Asyncio event loop based Qpid Reactor container.
Args:
*handlers: One or more connectors
Keyword Args:
handler_base: An IO Handler.
impl: Reactor implementation, default is pn_reactor.
"""
def __init__(self, *handlers, **kwargs):
loop = kwargs.get('loop', asyncio.get_event_loop())
kwargs['global_handler'] = AsyncioReactorHandler(
loop, kwargs.get('handler_base', None))
super().__init__(*handlers, **kwargs)
self.loop = loop
def run(self):
"""Start Reactor container and begin processing."""
self.start()
self.touch()
def touch(self):
"""Instruct the reactor container to do processing.
You might need to call this to startup new sessions. This is already
handled for create_receiver and create_sender.
"""
if self.process() and not self.quiesced:
self.loop.call_soon_threadsafe(self.touch)
def create_receiver(
self, context: Union[Connection, Session, Url, str],
source=None, target=None, name=None, dynamic=False, handler=None,
options: Union[LinkOption, List[LinkOption]] = None) -> Receiver:
"""Initiate a link to receive messages (subscription).
Args:
context: One of: created session, connection with or without
established session, or url to create session.
source: Source address.
target: Target address.
name: Name of the link.
dynamic: Wether a dynamic AMQP queue should be generated.
handler: Custom handler to handle received message.
options: LinkOptions to further control the attachment.
Returns:
Receiver: A Qpid Receiver link over which messages are received.
"""
receiver = super().create_receiver(context, source, target, name,
dynamic, handler, options)
self.touch()
return receiver
def create_sender(
self, context: Union[Connection, Session, Url, str],
target=None, source=None, name=None, handler=None, tags=None,
options: Union[LinkOption, List[LinkOption]] = None) -> Sender:
"""Initiate a link to send messages.
Args:
context: One of: created session, connection with or without
established session, or url to create session.
target: Target address.
source: Source address.
name: Name of the link.
handler: Custom handler to handle received message.
tags:
options: LinkOptions to further control the attachment.
Returns:
Sender: A Qpid Sender link over which messages are sent.
"""
sender = super().create_sender(context, target, source, name, handler,
tags, options)
self.touch()
return sender
| 34.723301
| 101
| 0.635677
|
9c6d027b05857838c2ccb545f3e6ed70c1c8697e
| 14,234
|
py
|
Python
|
homeassistant/components/wled/light.py
|
fanta759/core
|
fcdb54d8780900fb85e6c20d5382cfd13b69a0b3
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/wled/light.py
|
fanta759/core
|
fcdb54d8780900fb85e6c20d5382cfd13b69a0b3
|
[
"Apache-2.0"
] | 52
|
2020-07-14T14:12:26.000Z
|
2022-03-31T06:24:02.000Z
|
homeassistant/components/wled/light.py
|
fanta759/core
|
fcdb54d8780900fb85e6c20d5382cfd13b69a0b3
|
[
"Apache-2.0"
] | 1
|
2018-08-03T20:06:38.000Z
|
2018-08-03T20:06:38.000Z
|
"""Support for LED lights."""
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_HS_COLOR,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_TRANSITION,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_registry import (
async_get_registry as async_get_entity_registry,
)
from homeassistant.helpers.typing import HomeAssistantType
import homeassistant.util.color as color_util
from . import WLEDDataUpdateCoordinator, WLEDDeviceEntity, wled_exception_handler
from .const import (
ATTR_COLOR_PRIMARY,
ATTR_INTENSITY,
ATTR_ON,
ATTR_PALETTE,
ATTR_PLAYLIST,
ATTR_PRESET,
ATTR_REVERSE,
ATTR_SEGMENT_ID,
ATTR_SPEED,
DOMAIN,
SERVICE_EFFECT,
)
PARALLEL_UPDATES = 1
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up WLED light based on a config entry."""
coordinator: WLEDDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_EFFECT,
{
vol.Optional(ATTR_EFFECT): vol.Any(cv.positive_int, cv.string),
vol.Optional(ATTR_INTENSITY): vol.All(
vol.Coerce(int), vol.Range(min=0, max=255)
),
vol.Optional(ATTR_PALETTE): vol.Any(cv.positive_int, cv.string),
vol.Optional(ATTR_REVERSE): cv.boolean,
vol.Optional(ATTR_SPEED): vol.All(
vol.Coerce(int), vol.Range(min=0, max=255)
),
},
"async_effect",
)
update_segments = partial(
async_update_segments, entry, coordinator, {}, async_add_entities
)
coordinator.async_add_listener(update_segments)
update_segments()
class WLEDMasterLight(LightEntity, WLEDDeviceEntity):
"""Defines a WLED master light."""
def __init__(self, entry_id: str, coordinator: WLEDDataUpdateCoordinator):
"""Initialize WLED master light."""
super().__init__(
entry_id=entry_id,
coordinator=coordinator,
name=f"{coordinator.data.info.name} Master",
icon="mdi:led-strip-variant",
)
@property
def unique_id(self) -> str:
"""Return the unique ID for this sensor."""
return f"{self.coordinator.data.info.mac_address}"
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION
@property
def brightness(self) -> Optional[int]:
"""Return the brightness of this light between 1..255."""
return self.coordinator.data.state.brightness
@property
def is_on(self) -> bool:
"""Return the state of the light."""
return bool(self.coordinator.data.state.on)
@wled_exception_handler
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off the light."""
data = {ATTR_ON: False}
if ATTR_TRANSITION in kwargs:
# WLED uses 100ms per unit, so 10 = 1 second.
data[ATTR_TRANSITION] = round(kwargs[ATTR_TRANSITION] * 10)
await self.coordinator.wled.master(**data)
@wled_exception_handler
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on the light."""
data = {ATTR_ON: True}
if ATTR_TRANSITION in kwargs:
# WLED uses 100ms per unit, so 10 = 1 second.
data[ATTR_TRANSITION] = round(kwargs[ATTR_TRANSITION] * 10)
if ATTR_BRIGHTNESS in kwargs:
data[ATTR_BRIGHTNESS] = kwargs[ATTR_BRIGHTNESS]
await self.coordinator.wled.master(**data)
class WLEDSegmentLight(LightEntity, WLEDDeviceEntity):
"""Defines a WLED light based on a segment."""
def __init__(
self, entry_id: str, coordinator: WLEDDataUpdateCoordinator, segment: int
):
"""Initialize WLED segment light."""
self._rgbw = coordinator.data.info.leds.rgbw
self._segment = segment
# If this is the one and only segment, use a simpler name
name = f"{coordinator.data.info.name} Segment {self._segment}"
if len(coordinator.data.state.segments) == 1:
name = coordinator.data.info.name
super().__init__(
entry_id=entry_id,
coordinator=coordinator,
name=name,
icon="mdi:led-strip-variant",
)
@property
def unique_id(self) -> str:
"""Return the unique ID for this sensor."""
return f"{self.coordinator.data.info.mac_address}_{self._segment}"
@property
def available(self) -> bool:
"""Return True if entity is available."""
try:
self.coordinator.data.state.segments[self._segment]
except IndexError:
return False
return super().available
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the entity."""
playlist = self.coordinator.data.state.playlist
if playlist == -1:
playlist = None
preset = self.coordinator.data.state.preset
if preset == -1:
preset = None
segment = self.coordinator.data.state.segments[self._segment]
return {
ATTR_INTENSITY: segment.intensity,
ATTR_PALETTE: segment.palette.name,
ATTR_PLAYLIST: playlist,
ATTR_PRESET: preset,
ATTR_REVERSE: segment.reverse,
ATTR_SPEED: segment.speed,
}
@property
def hs_color(self) -> Optional[Tuple[float, float]]:
"""Return the hue and saturation color value [float, float]."""
color = self.coordinator.data.state.segments[self._segment].color_primary
return color_util.color_RGB_to_hs(*color[:3])
@property
def effect(self) -> Optional[str]:
"""Return the current effect of the light."""
return self.coordinator.data.state.segments[self._segment].effect.name
@property
def brightness(self) -> Optional[int]:
"""Return the brightness of this light between 1..255."""
state = self.coordinator.data.state
# If this is the one and only segment, calculate brightness based
# on the master and segment brightness
if len(state.segments) == 1:
return int(
(state.segments[self._segment].brightness * state.brightness) / 255
)
return state.segments[self._segment].brightness
@property
def white_value(self) -> Optional[int]:
"""Return the white value of this light between 0..255."""
color = self.coordinator.data.state.segments[self._segment].color_primary
return color[-1] if self._rgbw else None
@property
def supported_features(self) -> int:
"""Flag supported features."""
flags = (
SUPPORT_BRIGHTNESS
| SUPPORT_COLOR
| SUPPORT_COLOR_TEMP
| SUPPORT_EFFECT
| SUPPORT_TRANSITION
)
if self._rgbw:
flags |= SUPPORT_WHITE_VALUE
return flags
@property
def effect_list(self) -> List[str]:
"""Return the list of supported effects."""
return [effect.name for effect in self.coordinator.data.effects]
@property
def is_on(self) -> bool:
"""Return the state of the light."""
state = self.coordinator.data.state
# If there is a single segment, take master into account
if len(state.segments) == 1 and not state.on:
return False
return bool(state.segments[self._segment].on)
@wled_exception_handler
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off the light."""
data = {ATTR_ON: False}
if ATTR_TRANSITION in kwargs:
# WLED uses 100ms per unit, so 10 = 1 second.
data[ATTR_TRANSITION] = round(kwargs[ATTR_TRANSITION] * 10)
# If there is a single segment, control via the master
if len(self.coordinator.data.state.segments) == 1:
await self.coordinator.wled.master(**data)
return
data[ATTR_SEGMENT_ID] = self._segment
await self.coordinator.wled.segment(**data)
@wled_exception_handler
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on the light."""
data = {ATTR_ON: True, ATTR_SEGMENT_ID: self._segment}
if ATTR_COLOR_TEMP in kwargs:
mireds = color_util.color_temperature_kelvin_to_mired(
kwargs[ATTR_COLOR_TEMP]
)
data[ATTR_COLOR_PRIMARY] = tuple(
map(int, color_util.color_temperature_to_rgb(mireds))
)
if ATTR_HS_COLOR in kwargs:
hue, sat = kwargs[ATTR_HS_COLOR]
data[ATTR_COLOR_PRIMARY] = color_util.color_hsv_to_RGB(hue, sat, 100)
if ATTR_TRANSITION in kwargs:
# WLED uses 100ms per unit, so 10 = 1 second.
data[ATTR_TRANSITION] = round(kwargs[ATTR_TRANSITION] * 10)
if ATTR_BRIGHTNESS in kwargs:
data[ATTR_BRIGHTNESS] = kwargs[ATTR_BRIGHTNESS]
if ATTR_EFFECT in kwargs:
data[ATTR_EFFECT] = kwargs[ATTR_EFFECT]
# Support for RGBW strips, adds white value
if self._rgbw and any(
x in (ATTR_COLOR_TEMP, ATTR_HS_COLOR, ATTR_WHITE_VALUE) for x in kwargs
):
# WLED cannot just accept a white value, it needs the color.
# We use the last know color in case just the white value changes.
if all(x not in (ATTR_COLOR_TEMP, ATTR_HS_COLOR) for x in kwargs):
hue, sat = self.hs_color
data[ATTR_COLOR_PRIMARY] = color_util.color_hsv_to_RGB(hue, sat, 100)
# On a RGBW strip, when the color is pure white, disable the RGB LEDs in
# WLED by setting RGB to 0,0,0
if data[ATTR_COLOR_PRIMARY] == (255, 255, 255):
data[ATTR_COLOR_PRIMARY] = (0, 0, 0)
# Add requested or last known white value
if ATTR_WHITE_VALUE in kwargs:
data[ATTR_COLOR_PRIMARY] += (kwargs[ATTR_WHITE_VALUE],)
else:
data[ATTR_COLOR_PRIMARY] += (self.white_value,)
# When only 1 segment is present, switch along the master, and use
# the master for power/brightness control.
if len(self.coordinator.data.state.segments) == 1:
master_data = {ATTR_ON: True}
if ATTR_BRIGHTNESS in data:
master_data[ATTR_BRIGHTNESS] = data[ATTR_BRIGHTNESS]
data[ATTR_BRIGHTNESS] = 255
if ATTR_TRANSITION in data:
master_data[ATTR_TRANSITION] = data[ATTR_TRANSITION]
del data[ATTR_TRANSITION]
await self.coordinator.wled.segment(**data)
await self.coordinator.wled.master(**master_data)
return
await self.coordinator.wled.segment(**data)
@wled_exception_handler
async def async_effect(
self,
effect: Optional[Union[int, str]] = None,
intensity: Optional[int] = None,
palette: Optional[Union[int, str]] = None,
reverse: Optional[bool] = None,
speed: Optional[int] = None,
) -> None:
"""Set the effect of a WLED light."""
data = {ATTR_SEGMENT_ID: self._segment}
if effect is not None:
data[ATTR_EFFECT] = effect
if intensity is not None:
data[ATTR_INTENSITY] = intensity
if palette is not None:
data[ATTR_PALETTE] = palette
if reverse is not None:
data[ATTR_REVERSE] = reverse
if speed is not None:
data[ATTR_SPEED] = speed
await self.coordinator.wled.segment(**data)
@callback
def async_update_segments(
entry: ConfigEntry,
coordinator: WLEDDataUpdateCoordinator,
current: Dict[int, WLEDSegmentLight],
async_add_entities,
) -> None:
"""Update segments."""
segment_ids = {light.segment_id for light in coordinator.data.state.segments}
current_ids = set(current)
# Discard master (if present)
current_ids.discard(-1)
# Process new segments, add them to Home Assistant
new_entities = []
for segment_id in segment_ids - current_ids:
current[segment_id] = WLEDSegmentLight(entry.entry_id, coordinator, segment_id)
new_entities.append(current[segment_id])
# More than 1 segment now? Add master controls
if len(current_ids) < 2 and len(segment_ids) > 1:
current[-1] = WLEDMasterLight(entry.entry_id, coordinator)
new_entities.append(current[-1])
if new_entities:
async_add_entities(new_entities)
# Process deleted segments, remove them from Home Assistant
for segment_id in current_ids - segment_ids:
coordinator.hass.async_create_task(
async_remove_entity(segment_id, coordinator, current)
)
# Remove master if there is only 1 segment left
if len(current_ids) > 1 and len(segment_ids) < 2:
coordinator.hass.async_create_task(
async_remove_entity(-1, coordinator, current)
)
async def async_remove_entity(
index: int,
coordinator: WLEDDataUpdateCoordinator,
current: Dict[int, WLEDSegmentLight],
) -> None:
"""Remove WLED segment light from Home Assistant."""
entity = current[index]
await entity.async_remove()
registry = await async_get_entity_registry(coordinator.hass)
if entity.entity_id in registry.entities:
registry.async_remove(entity.entity_id)
del current[index]
| 33.179487
| 87
| 0.63805
|
141e1075fc2bb679e4853866a1b4b2a915f781bc
| 1,323
|
py
|
Python
|
flaim/users/views.py
|
bfssi-forest-dussault/flaime
|
7c898f07e0d7c16b63a59d2c0c56290e32a2df47
|
[
"MIT"
] | null | null | null |
flaim/users/views.py
|
bfssi-forest-dussault/flaime
|
7c898f07e0d7c16b63a59d2c0c56290e32a2df47
|
[
"MIT"
] | 11
|
2020-11-09T19:55:58.000Z
|
2022-02-13T19:43:03.000Z
|
flaim/users/views.py
|
BFSSI-Bioinformatics-Lab/flaime
|
1feedf3974114c16724bfbd235ce4637983a3f5e
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.views.generic import DetailView, RedirectView, UpdateView
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
User = get_user_model()
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_detail_view = UserDetailView.as_view()
class UserUpdateView(LoginRequiredMixin, UpdateView):
model = User
fields = ["name"]
def get_success_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
def get_object(self):
return User.objects.get(username=self.request.user.username)
def form_valid(self, form):
messages.add_message(
self.request, messages.INFO, _("Infos successfully updated")
)
return super().form_valid(form)
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return '/'
# return reverse("users:detail", kwargs={"username": self.request.user.username})
user_redirect_view = UserRedirectView.as_view()
| 25.442308
| 89
| 0.734694
|
eafa165ac0da2b150f3e2e7b21353f3b78d03a2e
| 9,364
|
py
|
Python
|
pyglet-hg/tools/epydoc/epydoc/__init__.py
|
sangh/LaserShow
|
abc95e465e3455dc220cc602dd58358c84666f29
|
[
"BSD-3-Clause"
] | 2
|
2017-05-10T08:27:22.000Z
|
2019-10-05T14:55:00.000Z
|
tools/epydoc/epydoc/__init__.py
|
seeminglee/pyglet64
|
3dd167b5b0d3ad132a157e404586e53c2bb21736
|
[
"BSD-3-Clause"
] | null | null | null |
tools/epydoc/epydoc/__init__.py
|
seeminglee/pyglet64
|
3dd167b5b0d3ad132a157e404586e53c2bb21736
|
[
"BSD-3-Clause"
] | null | null | null |
# epydoc
#
# Copyright (C) 2005 Edward Loper
# Author: Edward Loper <edloper@loper.org>
# URL: <http://epydoc.sf.net>
#
# $Id: __init__.py 1558 2007-02-27 05:28:35Z edloper $
"""
Automatic Python reference documentation generator. Epydoc processes
Python modules and docstrings to generate formatted API documentation,
in the form of HTML pages. Epydoc can be used via a command-line
interface (`epydoc.cli`) and a graphical interface (`epydoc.gui`).
Both interfaces let the user specify a set of modules or other objects
to document, and produce API documentation using the following steps:
1. Extract basic information about the specified objects, and objects
that are related to them (such as the values defined by a module).
This can be done via introspection, parsing, or both:
* *Introspection* imports the objects, and examines them directly
using Python's introspection mechanisms.
* *Parsing* reads the Python source files that define the objects,
and extracts information from those files.
2. Combine and process that information.
* **Merging**: Merge the information obtained from introspection &
parsing each object into a single structure.
* **Linking**: Replace any \"pointers\" that were created for
imported variables with the documentation that they point to.
* **Naming**: Assign unique *canonical names* to each of the
specified objects, and any related objects.
* **Docstrings**: Parse the docstrings of each of the specified
objects.
* **Inheritance**: Add variables to classes for any values that
they inherit from their base classes.
3. Generate output. Output can be generated in a variety of formats:
* An HTML webpage.
* A LaTeX document (which can be rendered as a PDF file)
* A plaintext description.
.. digraph:: Overview of epydoc's architecture
:caption: The boxes represent steps in epydoc's processing chain.
Arrows are annotated with the data classes used to
communicate between steps. The lines along the right
side mark what portions of the processing chain are
initiated by build_doc_index() and cli(). Click on
any item to see its documentation.
/*
Python module or value * *
/ \ | |
V V | |
introspect_docs() parse_docs() | |
\ / | |
V V | |
merge_docs() | |
| build_doc_index() cli()
V | |
link_imports() | |
| | |
V | |
assign_canonical_names() | |
| | |
V | |
parse_docstrings() | |
| | |
V | |
inherit_docs() * |
/ | \ |
V V V |
HTMLWriter LaTeXWriter PlaintextWriter *
*/
ranksep = 0.1;
node [shape="box", height="0", width="0"]
{ /* Task nodes */
node [fontcolor=\"#000060\"]
introspect [label="Introspect value:\\nintrospect_docs()",
href="<docintrospecter.introspect_docs>"]
parse [label="Parse source code:\\nparse_docs()",
href="<docparser.parse_docs>"]
merge [label="Merge introspected & parsed docs:\\nmerge_docs()",
href="<docbuilder.merge_docs>", width="2.5"]
link [label="Link imports:\\nlink_imports()",
href="<docbuilder.link_imports>", width="2.5"]
name [label="Assign names:\\nassign_canonical_names()",
href="<docbuilder.assign_canonical_names>", width="2.5"]
docstrings [label="Parse docstrings:\\nparse_docstring()",
href="<docstringparser.parse_docstring>", width="2.5"]
inheritance [label="Inherit docs from bases:\\ninherit_docs()",
href="<docbuilder.inherit_docs>", width="2.5"]
write_html [label="Write HTML output:\\nHTMLWriter",
href="<docwriter.html>"]
write_latex [label="Write LaTeX output:\\nLaTeXWriter",
href="<docwriter.latex>"]
write_text [label="Write text output:\\nPlaintextWriter",
href="<docwriter.plaintext>"]
}
{ /* Input & Output nodes */
node [fontcolor=\"#602000\", shape="plaintext"]
input [label="Python module or value"]
output [label="DocIndex", href="<apidoc.DocIndex>"]
}
{ /* Graph edges */
edge [fontcolor=\"#602000\"]
input -> introspect
introspect -> merge [label="APIDoc", href="<apidoc.APIDoc>"]
input -> parse
parse -> merge [label="APIDoc", href="<apidoc.APIDoc>"]
merge -> link [label=" DocIndex", href="<apidoc.DocIndex>"]
link -> name [label=" DocIndex", href="<apidoc.DocIndex>"]
name -> docstrings [label=" DocIndex", href="<apidoc.DocIndex>"]
docstrings -> inheritance [label=" DocIndex", href="<apidoc.DocIndex>"]
inheritance -> output
output -> write_html
output -> write_latex
output -> write_text
}
{ /* Task collections */
node [shape="circle",label="",width=.1,height=.1]
edge [fontcolor="black", dir="none", fontcolor=\"#000060\"]
l3 -> l4 [label=" epydoc.\\l docbuilder.\\l build_doc_index()",
href="<docbuilder.build_doc_index>"]
l1 -> l2 [label=" epydoc.\\l cli()", href="<cli>"]
}
{ rank=same; l1 l3 input }
{ rank=same; l2 write_html }
{ rank=same; l4 output }
Package Organization
====================
The epydoc package contains the following subpackages and modules:
.. packagetree::
:style: UML
The user interfaces are provided by the `gui` and `cli` modules.
The `apidoc` module defines the basic data types used to record
information about Python objects. The programmatic interface to
epydoc is provided by `docbuilder`. Docstring markup parsing is
handled by the `markup` package, and output generation is handled by
the `docwriter` package. See the submodule list for more
information about the submodules and subpackages.
:group User Interface: gui, cli
:group Basic Data Types: apidoc
:group Documentation Generation: docbuilder, docintrospecter, docparser
:group Docstring Processing: docstringparser, markup
:group Output Generation: docwriter
:group Completeness Checking: checker
:group Miscellaneous: log, util, test, compat
:author: `Edward Loper <edloper@gradient.cis.upenn.edu>`__
:requires: Python 2.3+
:version: 3.0 beta 1
:see: `The epydoc webpage <http://epydoc.sourceforge.net>`__
:see: `The epytext markup language
manual <http://epydoc.sourceforge.net/epytext.html>`__
:todo: Create a better default top_page than trees.html.
:todo: Fix trees.html to work when documenting non-top-level
modules/packages
:todo: Implement @include
:todo: Optimize epytext
:todo: More doctests
:todo: When introspecting, limit how much introspection you do (eg,
don't construct docs for imported modules' vars if it's
not necessary)
:bug: UserDict.* is interpreted as imported .. why??
:license: IBM Open Source License
:copyright: |copy| 2006 Edward Loper
:newfield contributor: Contributor, Contributors (Alphabetical Order)
:contributor: `Glyph Lefkowitz <mailto:glyph@twistedmatrix.com>`__
:contributor: `Edward Loper <mailto:edloper@gradient.cis.upenn.edu>`__
:contributor: `Bruce Mitchener <mailto:bruce@cubik.org>`__
:contributor: `Jeff O'Halloran <mailto:jeff@ohalloran.ca>`__
:contributor: `Simon Pamies <mailto:spamies@bipbap.de>`__
:contributor: `Christian Reis <mailto:kiko@async.com.br>`__
:contributor: `Daniele Varrazzo <mailto:daniele.varrazzo@gmail.com>`__
.. |copy| unicode:: 0xA9 .. copyright sign
"""
__docformat__ = 'restructuredtext en'
__version__ = '3.0beta1'
"""The version of epydoc"""
__author__ = 'Edward Loper <edloper@gradient.cis.upenn.edu>'
"""The primary author of eypdoc"""
__url__ = 'http://epydoc.sourceforge.net'
"""The URL for epydoc's homepage"""
__license__ = 'IBM Open Source License'
"""The license governing the use and distribution of epydoc"""
# [xx] this should probably be a private variable:
DEBUG = False
"""True if debugging is turned on."""
# Changes needed for docs:
# - document the method for deciding what's public/private
# - epytext: fields are defined slightly differently (@group)
# - new fields
# - document __extra_epydoc_fields__ and @newfield
# - Add a faq?
# - @type a,b,c: ...
# - new command line option: --command-line-order
| 41.070175
| 76
| 0.595258
|
4a9adcf48382d0cc69399e83c7fe685435c5031d
| 6,541
|
py
|
Python
|
tensorflow/contrib/session_bundle/gc.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 848
|
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tensorflow/contrib/session_bundle/gc.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 656
|
2019-12-03T00:48:46.000Z
|
2022-03-31T18:41:54.000Z
|
tensorflow/contrib/session_bundle/gc.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 506
|
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""System for specifying garbage collection (GC) of path based data.
This framework allows for GC of data specified by path names, for example files
on disk. gc.Path objects each represent a single item stored at a path and may
be a base directory,
/tmp/exports/0/...
/tmp/exports/1/...
...
or a fully qualified file,
/tmp/train-1.ckpt
/tmp/train-2.ckpt
...
A gc filter function takes and returns a list of gc.Path items. Filter
functions are responsible for selecting Path items for preservation or deletion.
Note that functions should always return a sorted list.
For example,
base_dir = "/tmp"
# create the directories
for e in xrange(10):
os.mkdir("%s/%d" % (base_dir, e), 0o755)
# create a simple parser that pulls the export_version from the directory
def parser(path):
match = re.match("^" + base_dir + "/(\\d+)$", path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
path_list = gc.get_paths("/tmp", parser) # contains all ten Paths
every_fifth = gc.mod_export_version(5)
print(every_fifth(path_list)) # shows ["/tmp/0", "/tmp/5"]
largest_three = gc.largest_export_versions(3)
print(largest_three(all_paths)) # shows ["/tmp/7", "/tmp/8", "/tmp/9"]
both = gc.union(every_fifth, largest_three)
print(both(all_paths)) # shows ["/tmp/0", "/tmp/5",
# "/tmp/7", "/tmp/8", "/tmp/9"]
# delete everything not in 'both'
to_delete = gc.negation(both)
for p in to_delete(all_paths):
gfile.rmtree(p.path) # deletes: "/tmp/1", "/tmp/2",
# "/tmp/3", "/tmp/4", "/tmp/6",
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import heapq
import math
import os
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
Path = collections.namedtuple('Path', 'path export_version')
@deprecated('2017-06-30',
'No longer supported. Switch to SavedModel immediately.')
def largest_export_versions(n):
"""Creates a filter that keeps the largest n export versions.
Args:
n: number of versions to keep.
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
heap = []
for idx, path in enumerate(paths):
if path.export_version is not None:
heapq.heappush(heap, (path.export_version, idx))
keepers = [paths[i] for _, i in heapq.nlargest(n, heap)]
return sorted(keepers)
return keep
@deprecated('2017-06-30',
'No longer supported. Switch to SavedModel immediately.')
def one_of_every_n_export_versions(n):
r"""Creates a filter that keeps one of every n export versions.
Args:
n: interval size.
Returns:
A filter function that keeps exactly one path from each interval
[0, n], (n, 2n], (2n, 3n], etc... If more than one path exists in an
interval the largest is kept.
"""
def keep(paths):
keeper_map = {} # map from interval to largest path seen in that interval
for p in paths:
if p.export_version is None:
# Skip missing export_versions.
continue
# Find the interval (with a special case to map export_version = 0 to
# interval 0.
interval = math.floor(
(p.export_version - 1) / n) if p.export_version else 0
existing = keeper_map.get(interval, None)
if (not existing) or (existing.export_version < p.export_version):
keeper_map[interval] = p
return sorted(keeper_map.values())
return keep
@deprecated('2017-06-30',
'No longer supported. Switch to SavedModel immediately.')
def mod_export_version(n):
"""Creates a filter that keeps every export that is a multiple of n.
Args:
n: step size.
Returns:
A filter function that keeps paths where export_version % n == 0.
"""
def keep(paths):
keepers = []
for p in paths:
if p.export_version % n == 0:
keepers.append(p)
return sorted(keepers)
return keep
@deprecated('2017-06-30',
'No longer supported. Switch to SavedModel immediately.')
def union(lf, rf):
"""Creates a filter that keeps the union of two filters.
Args:
lf: first filter
rf: second filter
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
l = set(lf(paths))
r = set(rf(paths))
return sorted(list(l|r))
return keep
@deprecated('2017-06-30',
'No longer supported. Switch to SavedModel immediately.')
def negation(f):
"""Negate a filter.
Args:
f: filter function to invert
Returns:
A filter function that returns the negation of f.
"""
def keep(paths):
l = set(paths)
r = set(f(paths))
return sorted(list(l-r))
return keep
@deprecated('2017-06-30',
'No longer supported. Switch to SavedModel immediately.')
def get_paths(base_dir, parser):
"""Gets a list of Paths in a given directory.
Args:
base_dir: directory.
parser: a function which gets the raw Path and can augment it with
information such as the export_version, or ignore the path by returning
None. An example parser may extract the export version from a path
such as "/tmp/exports/100" an another may extract from a full file
name such as "/tmp/checkpoint-99.out".
Returns:
A list of Paths contained in the base directory with the parsing function
applied.
By default the following fields are populated,
- Path.path
The parsing function is responsible for populating,
- Path.export_version
"""
raw_paths = gfile.ListDirectory(base_dir)
paths = []
for r in raw_paths:
p = parser(Path(os.path.join(base_dir, r), None))
if p:
paths.append(p)
return sorted(paths)
| 30.142857
| 80
| 0.668552
|
ee5567d49c2618fd75068e0e77a8c17500c82e9a
| 534
|
py
|
Python
|
tests/contrib/contrib_template_test.py
|
isabella232/uis-rnn
|
91764ceaad832be651f3d64a809a183e133154d6
|
[
"Apache-2.0"
] | 1,454
|
2018-10-23T09:37:57.000Z
|
2022-03-17T12:53:24.000Z
|
tests/contrib/contrib_template_test.py
|
isabella232/uis-rnn
|
91764ceaad832be651f3d64a809a183e133154d6
|
[
"Apache-2.0"
] | 87
|
2018-11-14T15:35:15.000Z
|
2021-07-26T05:08:34.000Z
|
tests/contrib/contrib_template_test.py
|
isabella232/uis-rnn
|
91764ceaad832be651f3d64a809a183e133154d6
|
[
"Apache-2.0"
] | 325
|
2018-10-30T21:36:42.000Z
|
2022-03-29T12:43:46.000Z
|
# Contributor information:
# Name: First Last
# GitHub account: username
# Email: username@abcd.edu
# Organization: ABCD University
"""This is a template for community contributions."""
import unittest
from uisrnn.contrib import contrib_template
class TestExampleFunction(unittest.TestCase):
"""Tests for example_function()."""
def test_example_function(self):
"""Test the return value of example_function()."""
self.assertTrue(contrib_template.example_function())
if __name__ == '__main__':
unittest.main()
| 26.7
| 56
| 0.747191
|
dcbfc6c46c434efb2bb9162515670ccb48b6d216
| 1,147
|
py
|
Python
|
holidata/holidays/hr-HR.py
|
xeals/holidata
|
c99a56b63b1cb9dc5f4f79f3de83ba3865215250
|
[
"MIT"
] | 32
|
2019-04-12T08:01:34.000Z
|
2022-02-28T04:41:50.000Z
|
holidata/holidays/hr-HR.py
|
xeals/holidata
|
c99a56b63b1cb9dc5f4f79f3de83ba3865215250
|
[
"MIT"
] | 74
|
2019-07-09T16:35:20.000Z
|
2022-03-09T16:41:34.000Z
|
holidata/holidays/hr-HR.py
|
xeals/holidata
|
c99a56b63b1cb9dc5f4f79f3de83ba3865215250
|
[
"MIT"
] | 20
|
2019-01-28T07:41:02.000Z
|
2022-02-16T02:38:57.000Z
|
# coding=utf-8
from dateutil.easter import EASTER_WESTERN
from holidata.utils import SmartDayArrow
from .holidays import Locale, Holiday
class hr_HR(Locale):
"""
01-01: [NF] Nova Godina
01-06: [NRF] Sveta tri kralja
05-01: [NF] Praznik rada
05-30: [NF] Dan državnosti
06-22: [NF] Dan antifašističke borbe
08-05: [NF] Dan pobjede i domovinske zahvalnosti i Dan hrvatskih branitelja
08-15: [NRF] Velika Gospa
11-01: [NRF] Dan svih svetih
12-25: [NRF] Božić
12-26: [NRF] Sveti Stjepan
Easter: [NRV] Uskrs
1 day after Easter: [NRV] Uskršnji ponedjeljak
60 days after Easter: [NRV] Tijelovo
"""
locale = "hr-HR"
easter_type = EASTER_WESTERN
def holiday_dan_sjecanja_na_zrtve_domovinskog_rata_i_dan_sjecanja_na_zrtvu_vukovara_i_skabrnje(self):
if self.year >= 2020:
return [Holiday(
self.locale,
"",
SmartDayArrow(self.year, 11, 18),
"Dan sjećanja na žrtve Domovinskog rata i Dan sjećanja na žrtvu Vukovara i Škabrnje",
"NF"
)]
else:
return []
| 29.410256
| 105
| 0.624237
|
2ecb5499a1710a5d1fa06edf17ad1955579ee5d1
| 7,008
|
py
|
Python
|
mmsr/train.py
|
yumingj/C2-Matching
|
fa171ca6707c6f16a5d04194ce866ea70bb21d2b
|
[
"Apache-2.0"
] | 111
|
2021-05-31T17:15:41.000Z
|
2022-03-30T03:06:31.000Z
|
mmsr/train.py
|
yumingj/C2-Matching
|
fa171ca6707c6f16a5d04194ce866ea70bb21d2b
|
[
"Apache-2.0"
] | 20
|
2021-06-02T03:03:25.000Z
|
2022-03-28T03:49:08.000Z
|
mmsr/train.py
|
yumingj/C2-Matching
|
fa171ca6707c6f16a5d04194ce866ea70bb21d2b
|
[
"Apache-2.0"
] | 21
|
2021-06-04T05:49:41.000Z
|
2022-03-18T12:45:24.000Z
|
import argparse
import logging
import math
import os.path as osp
import random
import time
import torch
from mmcv.runner import get_time_str, init_dist
from mmsr.data import create_dataloader, create_dataset
from mmsr.data.data_sampler import DistIterSampler
from mmsr.models import create_model
from mmsr.utils import (MessageLogger, get_root_logger, init_tb_logger,
make_exp_dirs, set_random_seed)
from mmsr.utils.options import dict2str, dict_to_nonedict, parse
from mmsr.utils.util import check_resume
def main():
# options
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, help='Path to option YAML file.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
opt = parse(args.opt, is_train=True)
# distributed training settings
if args.launcher == 'none': # disabled distributed training
opt['dist'] = False
rank = -1
print('Disabled distributed training.', flush=True)
else:
opt['dist'] = True
if args.launcher == 'slurm' and 'dist_params' in opt:
init_dist(args.launcher, **opt['dist_params'])
else:
init_dist(args.launcher)
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
# load resume states if exists
if opt['path'].get('resume_state', None):
device_id = torch.cuda.current_device()
resume_state = torch.load(
opt['path']['resume_state'],
map_location=lambda storage, loc: storage.cuda(device_id))
check_resume(opt, resume_state['iter'])
else:
resume_state = None
# mkdir and loggers
if resume_state is None:
make_exp_dirs(opt)
log_file = osp.join(opt['path']['log'],
f"train_{opt['name']}_{get_time_str()}.log")
logger = get_root_logger(
logger_name='base', log_level=logging.INFO, log_file=log_file)
logger.info(dict2str(opt))
# initialize tensorboard logger
tb_logger = None
if opt['use_tb_logger'] and 'debug' not in opt['name']:
tb_logger = init_tb_logger(log_dir='./tb_logger/' + opt['name'])
# convert to NoneDict, which returns None for missing keys
opt = dict_to_nonedict(opt)
# random seed
seed = opt['train']['manual_seed']
if seed is None:
seed = random.randint(1, 10000)
logger.info(f'Random seed: {seed}')
set_random_seed(seed)
torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.deterministic = True
# create train and val dataloaders
for phase, dataset_opt in opt['datasets'].items():
if phase == 'train':
# dataset_ratio: enlarge the size of datasets for each epoch
dataset_enlarge_ratio = dataset_opt.get('dataset_enlarge_ratio', 1)
train_set = create_dataset(dataset_opt)
train_size = int(
math.ceil(len(train_set) / dataset_opt['batch_size']))
total_iters = int(opt['train']['niter'])
total_epochs = int(math.ceil(total_iters / train_size))
if opt['dist']:
train_sampler = DistIterSampler(train_set, world_size, rank,
dataset_enlarge_ratio)
total_epochs = total_iters / (
train_size * dataset_enlarge_ratio)
total_epochs = int(math.ceil(total_epochs))
else:
train_sampler = None
train_loader = create_dataloader(train_set, dataset_opt, opt,
train_sampler)
logger.info(
f'Number of train images: {len(train_set)}, iters: {train_size}'
)
logger.info(
f'Total epochs needed: {total_epochs} for iters {total_iters}')
elif phase == 'val':
val_set = create_dataset(dataset_opt)
val_loader = create_dataloader(val_set, dataset_opt, opt, None)
logger.info(
f"Number of val images/folders in {dataset_opt['name']}: "
f'{len(val_set)}')
else:
raise NotImplementedError(f'Phase {phase} is not recognized.')
assert train_loader is not None
# create model
model = create_model(opt)
# resume training
if resume_state:
logger.info(f"Resuming training from epoch: {resume_state['epoch']}, "
f"iter: {resume_state['iter']}.")
start_epoch = resume_state['epoch']
current_iter = resume_state['iter']
model.resume_training(resume_state) # handle optimizers and schedulers
else:
current_iter = 0
start_epoch = 0
# create message logger (formatted outputs)
msg_logger = MessageLogger(opt, current_iter, tb_logger)
# training
logger.info(
f'Start training from epoch: {start_epoch}, iter: {current_iter}')
data_time, iter_time = 0, 0
for epoch in range(start_epoch, total_epochs + 1):
if opt['dist']:
train_sampler.set_epoch(epoch)
for _, train_data in enumerate(train_loader):
data_time = time.time() - data_time
current_iter += 1
if current_iter > total_iters:
break
# update learning rate
model.update_learning_rate(
current_iter, warmup_iter=opt['train']['warmup_iter'])
# training
model.feed_data(train_data)
model.optimize_parameters(current_iter)
iter_time = time.time() - iter_time
# log
if current_iter % opt['logger']['print_freq'] == 0:
log_vars = {'epoch': epoch, 'iter': current_iter}
log_vars.update({'lrs': model.get_current_learning_rate()})
log_vars.update({'time': iter_time, 'data_time': data_time})
log_vars.update(model.get_current_log())
msg_logger(log_vars)
# validation
if opt['datasets'][
'val'] and current_iter % opt['val']['val_freq'] == 0:
model.validation(val_loader, current_iter, tb_logger,
opt['val']['save_img'])
# save models and training states
if current_iter % opt['logger']['save_checkpoint_freq'] == 0:
logger.info('Saving models and training states.')
model.save(epoch, current_iter)
data_time = time.time()
iter_time = time.time()
# end of iter
# end of epoch
logger.info('End of training.')
logger.info('Saving the latest model.')
model.save(epoch=-1, current_iter=-1) # -1 for the latest
if tb_logger:
tb_logger.close()
if __name__ == '__main__':
main()
| 36.884211
| 80
| 0.602169
|
3e89f0d720db88316524dfd76c357c7cbdcea5b7
| 1,033
|
py
|
Python
|
main/models.py
|
bensteinberg/dostuff
|
bb71c73674160906b557967f1a8d3f18507dc943
|
[
"MIT"
] | 3
|
2021-08-10T17:58:23.000Z
|
2021-11-17T09:52:38.000Z
|
main/models.py
|
bensteinberg/dostuff
|
bb71c73674160906b557967f1a8d3f18507dc943
|
[
"MIT"
] | 11
|
2020-02-26T21:12:55.000Z
|
2021-06-02T16:10:07.000Z
|
main/models.py
|
harvard-lil/dostuff
|
8100b111f4042aefe94ea09239e1afbed3e5babc
|
[
"MIT"
] | 1
|
2019-01-18T21:36:41.000Z
|
2019-01-18T21:36:41.000Z
|
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.crypto import get_random_string
from jsonfield import JSONField
from rest_framework.authtoken.models import Token
class Event(models.Model):
data = JSONField()
timestamp = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey(User, on_delete=models.CASCADE)
room_name = models.CharField(max_length=255, default="display")
class Meta:
ordering = ['-timestamp']
# Create API tokens for new users
# via http://www.django-rest-framework.org/api-guide/authentication/#by-using-signals
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(
user=instance,
key=get_random_string(4, allowed_chars="23456789ABCDEFGHJKLMNPQRSTUVWXYZ"),
)
| 35.62069
| 87
| 0.759923
|
1530c0a05984a874484c59ee7615ff10d594bf3c
| 8,553
|
py
|
Python
|
lib/galaxy_test/base/rules_test_data.py
|
beatrizserrano/galaxy
|
e149d9d32e1bca6c07c38b1a9cdabfee60323610
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy_test/base/rules_test_data.py
|
beatrizserrano/galaxy
|
e149d9d32e1bca6c07c38b1a9cdabfee60323610
|
[
"CC-BY-3.0"
] | 6
|
2021-11-11T20:57:49.000Z
|
2021-12-10T15:30:33.000Z
|
lib/galaxy_test/base/rules_test_data.py
|
beatrizserrano/galaxy
|
e149d9d32e1bca6c07c38b1a9cdabfee60323610
|
[
"CC-BY-3.0"
] | null | null | null |
def check_example_1(hdca, dataset_populator):
assert hdca["collection_type"] == "list"
assert hdca["element_count"] == 2
first_dce = hdca["elements"][0]
first_hda = first_dce["object"]
assert first_hda["hid"] > 3
def check_example_2(hdca, dataset_populator):
assert hdca["collection_type"] == "list:list"
assert hdca["element_count"] == 2
first_collection_level = hdca["elements"][0]
assert first_collection_level["element_type"] == "dataset_collection"
second_collection_level = first_collection_level["object"]
assert second_collection_level["collection_type"] == "list"
assert second_collection_level["elements"][0]["element_type"] == "hda"
def check_example_3(hdca, dataset_populator):
assert hdca["collection_type"] == "list"
assert hdca["element_count"] == 2
first_element = hdca["elements"][0]
assert first_element["element_identifier"] == "test0forward"
def check_example_4(hdca, dataset_populator):
assert hdca["collection_type"] == "list:list"
assert hdca["element_count"] == 2
first_collection_level = hdca["elements"][0]
assert first_collection_level["element_identifier"] == "single", hdca
assert first_collection_level["element_type"] == "dataset_collection"
second_collection_level = first_collection_level["object"]
assert "elements" in second_collection_level, hdca
assert len(second_collection_level["elements"]) == 1, hdca
i1_element = second_collection_level["elements"][0]
assert "object" in i1_element, hdca
assert "element_identifier" in i1_element
assert i1_element["element_identifier"] == "i1", hdca
assert len(i1_element["object"]["tags"]) == 0
def check_example_5(hdca, dataset_populator):
assert hdca["collection_type"] == "list:list"
assert hdca["element_count"] == 2
first_collection_level = hdca["elements"][0]
assert first_collection_level["element_identifier"] == "single", hdca
assert first_collection_level["element_type"] == "dataset_collection"
second_collection_level = first_collection_level["object"]
assert "elements" in second_collection_level, hdca
assert len(second_collection_level["elements"]) == 1, hdca
i1_element = second_collection_level["elements"][0]
assert "object" in i1_element, hdca
assert "element_identifier" in i1_element
assert i1_element["element_identifier"] == "i1", hdca
tags = i1_element["object"]["tags"]
assert len(tags) > 0
assert "group:single" in tags, tags
assert "i1" in tags, tags
def check_example_6(hdca, dataset_populator):
assert hdca["collection_type"] == "list"
assert hdca["element_count"] == 3
i1_element = hdca["elements"][0]
assert "object" in i1_element, hdca
assert "element_identifier" in i1_element
assert i1_element["element_identifier"] == "i1", hdca
tags = i1_element["object"]["tags"]
assert len(tags) == 2
assert "random" in tags
assert "group:type:single" in tags
EXAMPLE_1 = {
"rules": {
"rules": [
{
"type": "add_column_metadata",
"value": "identifier0",
}
],
"mapping": [
{
"type": "list_identifiers",
"columns": [0],
}
],
},
"test_data": {
"type": "list",
"elements": [
{
"identifier": "i1",
"contents": "0",
"class": "File",
},
{
"identifier": "i2",
"contents": "1",
"class": "File",
},
],
},
"check": check_example_1,
"output_hid": 6,
}
EXAMPLE_2 = {
"rules": {
"rules": [
{
"type": "add_column_metadata",
"value": "identifier0",
},
{
"type": "add_column_metadata",
"value": "identifier0",
},
],
"mapping": [
{
"type": "list_identifiers",
"columns": [0, 1],
}
],
},
"test_data": {
"type": "list",
"elements": [
{
"identifier": "i1",
"contents": "0",
"class": "File",
},
{
"identifier": "i2",
"contents": "1",
"class": "File",
},
],
},
"check": check_example_2,
"output_hid": 6,
}
# Flatten
EXAMPLE_3 = {
"rules": {
"rules": [
{
"type": "add_column_metadata",
"value": "identifier0",
},
{
"type": "add_column_metadata",
"value": "identifier1",
},
{
"type": "add_column_concatenate",
"target_column_0": 0,
"target_column_1": 1,
},
],
"mapping": [
{
"type": "list_identifiers",
"columns": [2],
}
],
},
"test_data": {
"type": "list:paired",
"elements": [
{
"identifier": "test0",
"elements": [
{"identifier": "forward", "class": "File", "contents": "TestData123"},
{"identifier": "reverse", "class": "File", "contents": "TestData123"},
],
}
],
},
"check": check_example_3,
"output_hid": 6,
}
# Nesting with group tags.
EXAMPLE_4 = {
"rules": {
"rules": [
{
"type": "add_column_metadata",
"value": "identifier0",
},
{"type": "add_column_group_tag_value", "value": "type", "default_value": "unused"},
],
"mapping": [
{
"type": "list_identifiers",
"columns": [1, 0],
}
],
},
"test_data": {
"type": "list",
"elements": [
{"identifier": "i1", "contents": "0", "class": "File", "tags": ["random", "group:type:single"]},
{"identifier": "i2", "contents": "1", "class": "File", "tags": ["random", "group:type:paired"]},
{"identifier": "i3", "contents": "2", "class": "File", "tags": ["random", "group:type:paired"]},
],
},
"check": check_example_4,
"output_hid": 8,
}
EXAMPLE_5 = {
"rules": {
"rules": [
{
"type": "add_column_metadata",
"value": "identifier0",
},
{"type": "add_column_group_tag_value", "value": "type", "default_value": "unused"},
],
"mapping": [
{
"type": "list_identifiers",
"columns": [1, 0],
},
{
"type": "group_tags",
"columns": [1],
},
{
"type": "tags",
"columns": [0],
},
],
},
"test_data": {
"type": "list",
"elements": [
{"identifier": "i1", "contents": "0", "class": "File", "tags": ["random", "group:type:single"]},
{"identifier": "i2", "contents": "1", "class": "File", "tags": ["random", "group:type:paired"]},
{"identifier": "i3", "contents": "2", "class": "File", "tags": ["random", "group:type:paired"]},
],
},
"check": check_example_5,
"output_hid": 8,
}
EXAMPLE_6 = {
"rules": {
"rules": [
{
"type": "add_column_metadata",
"value": "identifier0",
},
{
"type": "add_column_metadata",
"value": "tags",
},
],
"mapping": [
{
"type": "list_identifiers",
"columns": [0],
},
{
"type": "tags",
"columns": [1],
},
],
},
"test_data": {
"type": "list",
"elements": [
{"identifier": "i1", "contents": "0", "class": "File", "tags": ["random", "group:type:single"]},
{"identifier": "i2", "contents": "1", "class": "File", "tags": ["random", "group:type:paired"]},
{"identifier": "i3", "contents": "2", "class": "File", "tags": ["random", "group:type:paired"]},
],
},
"check": check_example_6,
"output_hid": 8,
}
| 29.493103
| 108
| 0.483573
|
eac726bc73cb1fc99d01c9af90bcc4d84c6d8437
| 2,880
|
py
|
Python
|
setup.py
|
inkwork/fb-reporter
|
0b153f248e49ffd994cba5f3d92377df7dc17b8f
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
inkwork/fb-reporter
|
0b153f248e49ffd994cba5f3d92377df7dc17b8f
|
[
"Apache-2.0"
] | 10
|
2019-02-01T15:05:39.000Z
|
2019-02-06T18:02:54.000Z
|
setup.py
|
inkwork/fb-reporter
|
0b153f248e49ffd994cba5f3d92377df7dc17b8f
|
[
"Apache-2.0"
] | null | null | null |
import io
import os
import sys
from setuptools import setup, find_packages
# Package meta-data.
NAME = "fontelemetry"
DESCRIPTION = "A library and toolset for typeface software development reporting."
LICENSE = "Apache License v2.0"
URL = "https://github.com/googlefonts/fontelemetry"
EMAIL = "dcrossland@google.com"
AUTHOR = "Fontelemetry Authors and Contributors"
REQUIRES_PYTHON = ">=3.6.0"
INSTALL_REQUIRES = ["fontTools==3.35.0",
"fs<3,>=2.1.1",
"glyphsLib==3.1.4",
"iPython==7.2.0",
"pandas==0.23.4",
"plotly==3.6.0",
"notebook==6.4.10",
"toml==0.10.0"]
# Optional packages
EXTRAS_REQUIRES = {
# for developer installs
"dev": ["wheel", "setuptools", "twine"]
}
this_file_path = os.path.abspath(os.path.dirname(__file__))
# Version
main_namespace = {}
version_fp = os.path.join(this_file_path, "Lib", "fontelemetry", "__init__.py")
try:
with io.open(version_fp) as v:
exec(v.read(), main_namespace)
except IOError as version_e:
sys.stderr.write(
"[ERROR] setup.py: Failed to read the version data for the version definition: {}".format(
str(version_e)
)
)
raise version_e
# Use repository Markdown README.md for PyPI long description
try:
with io.open("README.md", encoding="utf-8") as f:
readme = f.read()
except IOError as readme_e:
sys.stderr.write(
"[ERROR] setup.py: Failed to read the README.md file for the long description definition: {}".format(
str(readme_e)
)
)
raise readme_e
setup(
name=NAME,
version=main_namespace["__version__"],
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
url=URL,
license=LICENSE,
platforms=["Any"],
long_description=readme,
long_description_content_type="text/markdown",
package_dir={"": "Lib"},
packages=find_packages("Lib"),
include_package_data=True,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRES,
python_requires=REQUIRES_PYTHON,
entry_points={"console_scripts": ["fontelemetry = fontelemetry.__main__:main"]},
classifiers=[
"Development Status :: 3 - Alpha",
"Framework :: Jupyter",
"Environment :: Console",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Text Processing :: Fonts",
"Topic :: Multimedia :: Graphics",
"Topic :: Multimedia :: Graphics :: Graphics Conversion",
],
)
| 31.304348
| 109
| 0.627431
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.