hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f04ba688518b293c3f58f28b313a6e8e7fd63f49 | 214 | py | Python | URI 1017.py | Azefalo/Cluble-de-Programacao-UTFPR | f4a457bae36ac61378766035abc0633f5b3492db | [
"MIT"
] | 1 | 2021-04-19T22:42:00.000Z | 2021-04-19T22:42:00.000Z | URI 1017.py | Azefalo/Cluble-de-Programacao-UTFPR | f4a457bae36ac61378766035abc0633f5b3492db | [
"MIT"
] | null | null | null | URI 1017.py | Azefalo/Cluble-de-Programacao-UTFPR | f4a457bae36ac61378766035abc0633f5b3492db | [
"MIT"
] | null | null | null | # https://www.beecrowd.com.br/judge/en/problems/view/1017
car_efficiency = 12 # Km/L
time = int(input())
average_speed = int(input())
liters = (time * average_speed) / car_efficiency
print(f"{liters:.3f}") | 26.75 | 58 | 0.686916 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.373832 |
f04ce64c795e8f616352eaaa159edec4673a3240 | 1,432 | py | Python | src/cpp/convert.py | shindavid/splendor | b51b0408967627dbd61f60f57031d1fe21aa9d8f | [
"MIT"
] | 1 | 2017-11-02T18:32:51.000Z | 2017-11-02T18:32:51.000Z | src/cpp/convert.py | shindavid/splendor | b51b0408967627dbd61f60f57031d1fe21aa9d8f | [
"MIT"
] | 1 | 2018-07-05T09:07:40.000Z | 2018-07-05T09:07:40.000Z | src/cpp/convert.py | shindavid/splendor | b51b0408967627dbd61f60f57031d1fe21aa9d8f | [
"MIT"
] | null | null | null | filename = '../py/cards.py'
f = open(filename)
color_map = {
'W' : 'eWhite',
'U' : 'eBlue',
'G' : 'eGreen',
'R' : 'eRed',
'B' : 'eBlack',
'J' : 'eGold',
}
color_index_map = {
'W' : 0,
'U' : 1,
'G' : 2,
'R' : 3,
'B' : 4
}
def convert(cost_str):
cost_array = [0,0,0,0,0]
tokens = [x.strip() for x in cost_str.split(',')]
for token in tokens:
subtokens = token.split(':')
color_index = color_index_map[subtokens[0]]
count = int(subtokens[1])
cost_array[color_index] = count
return ', '.join([str(x) for x in cost_array])
ID = 0
first = True
for line in f:
if line.count('_add_card'):
if first:
first = False
continue
lp = line.find('(')
rp = line.find(')')
lb = line.find('{')
rb = line.find('}')
cost_str = line[lb+1:rb]
tokens = line[lp+1:rp].split(',')
level = int(tokens[0].strip()) - 1
points = int(tokens[1].strip())
color = color_map[tokens[2].strip()]
print ' {%2d, {%s}, %s, %s, %s},' % (ID, convert(cost_str), points, level, color)
ID += 1
ID = 0
f = open(filename)
first = True
for line in f:
if line.count('_add_noble'):
if first:
first = False
continue
lp = line.find('(')
rp = line.find(')')
lb = line.find('{')
rb = line.find('}')
cost_str = line[lb+1:rb]
print ' {%s, 3, {%s}},' % (ID, convert(cost_str))
ID += 1
| 21.058824 | 88 | 0.513966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 203 | 0.14176 |
f04d3917118eeab7daa5965475644fcb12277751 | 230 | py | Python | code_examples/projections/cyl.py | ezcitron/BasemapTutorial | 0db9248b430d39518bdfdb25d713145be4eb966a | [
"CC0-1.0"
] | 99 | 2015-01-14T21:20:48.000Z | 2022-01-25T10:38:37.000Z | code_examples/projections/cyl.py | ezcitron/BasemapTutorial | 0db9248b430d39518bdfdb25d713145be4eb966a | [
"CC0-1.0"
] | 1 | 2017-08-31T07:02:20.000Z | 2017-08-31T07:02:20.000Z | code_examples/projections/cyl.py | ezcitron/BasemapTutorial | 0db9248b430d39518bdfdb25d713145be4eb966a | [
"CC0-1.0"
] | 68 | 2015-01-14T21:21:01.000Z | 2022-01-29T14:53:38.000Z | from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
map = Basemap(projection='cyl')
map.drawmapboundary(fill_color='aqua')
map.fillcontinents(color='coral',lake_color='aqua')
map.drawcoastlines()
plt.show() | 23 | 51 | 0.795652 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.104348 |
f04d3f1d74f6c269738f89d87766211a982c25ba | 3,926 | py | Python | applications/ShapeOptimizationApplication/python_scripts/analyzer_internal.py | AndreaVoltan/MyKratos7.0 | e977752722e8ef1b606f25618c4bf8fd04c434cc | [
"BSD-4-Clause"
] | 2 | 2020-04-30T19:13:08.000Z | 2021-04-14T19:40:47.000Z | applications/ShapeOptimizationApplication/python_scripts/analyzer_internal.py | AndreaVoltan/MyKratos7.0 | e977752722e8ef1b606f25618c4bf8fd04c434cc | [
"BSD-4-Clause"
] | 1 | 2020-04-30T19:19:09.000Z | 2020-05-02T14:22:36.000Z | applications/ShapeOptimizationApplication/python_scripts/analyzer_internal.py | AndreaVoltan/MyKratos7.0 | e977752722e8ef1b606f25618c4bf8fd04c434cc | [
"BSD-4-Clause"
] | 1 | 2020-06-12T08:51:24.000Z | 2020-06-12T08:51:24.000Z | # ==============================================================================
# KratosShapeOptimizationApplication
#
# License: BSD License
# license: ShapeOptimizationApplication/license.txt
#
# Main authors: Baumgaertner Daniel, https://github.com/dbaumgaertner
# Geiser Armin, https://github.com/armingeiser
#
# ==============================================================================
# Making KratosMultiphysics backward compatible with python 2.6 and 2.7
from __future__ import print_function, absolute_import, division
# Kratos Core and Apps
from KratosMultiphysics import *
from KratosMultiphysics.StructuralMechanicsApplication import *
# Additional imports
import response_function_factory
import time as timer
# ==============================================================================
class KratosInternalAnalyzer( (__import__("analyzer_base")).AnalyzerBaseClass ):
# --------------------------------------------------------------------------
def __init__( self, optimization_settings, model_part_controller ):
self.model_part_controller = model_part_controller
self.response_function_list = response_function_factory.CreateListOfResponseFunctions(optimization_settings, self.model_part_controller.GetModel())
# --------------------------------------------------------------------------
def InitializeBeforeOptimizationLoop( self ):
for response in self.response_function_list.values():
response.Initialize()
# --------------------------------------------------------------------------
def AnalyzeDesignAndReportToCommunicator( self, currentDesign, optimizationIteration, communicator ):
optimization_model_part = self.model_part_controller.GetOptimizationModelPart()
time_before_analysis = optimization_model_part.ProcessInfo.GetValue(TIME)
step_before_analysis = optimization_model_part.ProcessInfo.GetValue(STEP)
delta_time_before_analysis = optimization_model_part.ProcessInfo.GetValue(DELTA_TIME)
for identifier, response in self.response_function_list.items():
# Reset step/time iterators such that they match the optimization iteration after calling CalculateValue (which internally calls CloneTimeStep)
optimization_model_part.ProcessInfo.SetValue(STEP, step_before_analysis-1)
optimization_model_part.ProcessInfo.SetValue(TIME, time_before_analysis-1)
optimization_model_part.ProcessInfo.SetValue(DELTA_TIME, 0)
response.InitializeSolutionStep()
# response values
if communicator.isRequestingValueOf(identifier):
response.CalculateValue()
communicator.reportValue(identifier, response.GetValue())
# response gradients
if communicator.isRequestingGradientOf(identifier):
response.CalculateGradient()
communicator.reportGradient(identifier, response.GetShapeGradient())
response.FinalizeSolutionStep()
# Clear results or modifications on model part
optimization_model_part.ProcessInfo.SetValue(STEP, step_before_analysis)
optimization_model_part.ProcessInfo.SetValue(TIME, time_before_analysis)
optimization_model_part.ProcessInfo.SetValue(DELTA_TIME, delta_time_before_analysis)
self.model_part_controller.SetMeshToReferenceMesh()
self.model_part_controller.SetDeformationVariablesToZero()
# --------------------------------------------------------------------------
def FinalizeAfterOptimizationLoop( self ):
for response in self.response_function_list.values():
response.Finalize()
# ==============================================================================
| 50.333333 | 156 | 0.609272 | 2,953 | 0.752165 | 0 | 0 | 0 | 0 | 0 | 0 | 1,278 | 0.325522 |
f04e299a6b487778e8fe610c813dd85847139172 | 529 | py | Python | tests/frontend/detector/test_fast.py | swershrimpy/gtsfm | 8d301eb3ef9172345a1ac1369fd4e19764d28946 | [
"Apache-2.0"
] | 122 | 2021-02-07T23:01:58.000Z | 2022-03-30T13:10:35.000Z | tests/frontend/detector/test_fast.py | swershrimpy/gtsfm | 8d301eb3ef9172345a1ac1369fd4e19764d28946 | [
"Apache-2.0"
] | 273 | 2021-01-30T16:45:26.000Z | 2022-03-16T15:02:33.000Z | tests/frontend/detector/test_fast.py | swershrimpy/gtsfm | 8d301eb3ef9172345a1ac1369fd4e19764d28946 | [
"Apache-2.0"
] | 13 | 2021-03-12T03:01:27.000Z | 2022-03-11T03:16:54.000Z | """Tests for frontend's FAST detector class.
Authors: Ayush Baid
"""
import unittest
import tests.frontend.detector.test_detector_base as test_detector_base
from gtsfm.frontend.detector.fast import Fast
class TestFast(test_detector_base.TestDetectorBase):
"""Test class for FAST detector class in frontend.
All unit test functions defined in TestDetectorBase are run automatically.
"""
def setUp(self):
super().setUp()
self.detector = Fast()
if __name__ == "__main__":
unittest.main()
| 22.041667 | 78 | 0.729679 | 272 | 0.514178 | 0 | 0 | 0 | 0 | 0 | 0 | 217 | 0.410208 |
f04f803f10f18e34c63851533b89db8888254793 | 2,135 | py | Python | problem/13_Roman_to_Integer.py | YoungYoung619/leetcode | 973fd4a971ddc80a8ceb7b8aff08a2104477e768 | [
"Apache-2.0"
] | null | null | null | problem/13_Roman_to_Integer.py | YoungYoung619/leetcode | 973fd4a971ddc80a8ceb7b8aff08a2104477e768 | [
"Apache-2.0"
] | null | null | null | problem/13_Roman_to_Integer.py | YoungYoung619/leetcode | 973fd4a971ddc80a8ceb7b8aff08a2104477e768 | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) College of Mechatronics and Control Engineering, Shenzhen University.
All rights reserved.
Description :
Author:Team Li
"""
"""
13. Roman to Integer
Easy
Roman numerals are represented by seven different symbols: I, V, X, L, C, D and M.
Symbol Value
I 1
V 5
X 10
L 50
C 100
D 500
M 1000
For example, two is written as II in Roman numeral, just two one's added together. Twelve is written as, XII, which is simply X + II. The number twenty seven is written as XXVII, which is XX + V + II.
Roman numerals are usually written largest to smallest from left to right. However, the numeral for four is not IIII. Instead, the number four is written as IV. Because the one is before the five we subtract it making four. The same principle applies to the number nine, which is written as IX. There are six instances where subtraction is used:
I can be placed before V (5) and X (10) to make 4 and 9.
X can be placed before L (50) and C (100) to make 40 and 90.
C can be placed before D (500) and M (1000) to make 400 and 900.
Given a roman numeral, convert it to an integer. Input is guaranteed to be within the range from 1 to 3999.
Example 1:
Input: "III"
Output: 3
Example 2:
Input: "IV"
Output: 4
Example 3:
Input: "IX"
Output: 9
Example 4:
Input: "LVIII"
Output: 58
Explanation: L = 50, V= 5, III = 3.
Example 5:
Input: "MCMXCIV"
Output: 1994
Explanation: M = 1000, CM = 900, XC = 90 and IV = 4.
"""
def romanToInt(s):
"""
:type s: str
:rtype: int
"""
c2num_normal = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
c2num_sp = {'IV': 4, 'IX': 9, 'XL': 40, 'XC': 90, 'CD': 400, 'CM': 900}
def sub_roman2int(s_index, num):
if s_index == len(s):
return num
for sp in list(c2num_sp.keys()):
if s.startswith(sp, s_index):
num += c2num_sp[sp]
return sub_roman2int(s_index + 2, num)
num += c2num_normal[s[s_index]]
return sub_roman2int(s_index + 1, num)
return sub_roman2int(0, 0) | 27.371795 | 345 | 0.625761 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,608 | 0.752457 |
f04fe6a0ae66518e152066d2f7472e765f8bd343 | 2,173 | py | Python | tests/test_cli.py | KoichiYasuoka/pynlpir | 8d5e994796a2b5d513f7db8d76d7d24a85d531b1 | [
"MIT"
] | 537 | 2015-01-12T09:59:57.000Z | 2022-03-29T09:22:30.000Z | tests/test_cli.py | KoichiYasuoka/pynlpir | 8d5e994796a2b5d513f7db8d76d7d24a85d531b1 | [
"MIT"
] | 110 | 2015-01-02T13:17:56.000Z | 2022-03-24T07:43:02.000Z | tests/test_cli.py | KoichiYasuoka/pynlpir | 8d5e994796a2b5d513f7db8d76d7d24a85d531b1 | [
"MIT"
] | 150 | 2015-01-21T01:58:56.000Z | 2022-02-23T16:16:40.000Z | """Unit tests for pynlpir's cli.py file."""
import os
import shutil
import stat
import unittest
try:
from urllib.error import URLError
from urllib.request import urlopen
except ImportError:
from urllib2 import URLError, urlopen
from click.testing import CliRunner
from pynlpir import cli
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
LICENSE_FILE = os.path.join(TEST_DIR, 'data', 'NLPIR.user')
def can_reach_github():
"""Check if we can reach GitHub's website."""
try:
urlopen('http://github.com')
return True
except URLError:
return False
@unittest.skipIf(can_reach_github() is False, 'Unable to reach GitHub')
class TestCLI(unittest.TestCase):
"""Unit tests for the PyNLPIR CLI."""
def setUp(self):
self.runner = CliRunner()
def tearDown(self):
self.runner = None
def test_initial_license_download(self):
"""Tests that an initial license download works correctly."""
with self.runner.isolated_filesystem():
result = self.runner.invoke(cli.cli, ('update', '-d.'))
self.assertEqual(0, result.exit_code)
self.assertEqual('License updated.\n', result.output)
def test_license_update(self):
"Test that a regular license update works correctly."""
with self.runner.isolated_filesystem():
shutil.copyfile(LICENSE_FILE, os.path.basename(LICENSE_FILE))
result = self.runner.invoke(cli.cli, ('update', '-d.'))
self.assertEqual(0, result.exit_code)
self.assertEqual('License updated.\n', result.output)
result = self.runner.invoke(cli.cli, ('update', '-d.'))
self.assertEqual(0, result.exit_code)
self.assertEqual('Your license is already up-to-date.\n',
result.output)
def test_license_write_fail(self):
"""Test tha writing a license file fails appropriately."""
with self.runner.isolated_filesystem():
cwd = os.getcwd()
os.chmod(cwd, stat.S_IREAD)
with self.assertRaises((IOError, OSError)):
cli.update_license_file(cwd)
| 32.432836 | 73 | 0.645651 | 1,497 | 0.688909 | 0 | 0 | 1,569 | 0.722043 | 0 | 0 | 478 | 0.219972 |
f05121798a0e9a1fc1209e3a2d1e90910d1020e8 | 13,746 | py | Python | src/old/parsemod/cft_expr.py | TeaCondemns/cofty | 0228c8f0e85c8b977586cc88123e198c64953dcb | [
"Apache-2.0",
"MIT"
] | 1 | 2022-02-17T18:14:58.000Z | 2022-02-17T18:14:58.000Z | src/old/parsemod/cft_expr.py | TeaCondemns/cofty | 0228c8f0e85c8b977586cc88123e198c64953dcb | [
"Apache-2.0",
"MIT"
] | 2 | 2022-03-12T19:33:44.000Z | 2022-03-13T11:14:12.000Z | src/old/parsemod/cft_expr.py | TeaCondemns/cofty | 0228c8f0e85c8b977586cc88123e198c64953dcb | [
"Apache-2.0",
"MIT"
] | null | null | null | from cft_namehandler import NameHandler, get_value_returned_type, get_local_name, get_abs_composed_name
from parsemod.cft_name import is_name, is_kw, compose_name
from parsemod.cft_syntaxtree_values import str_type
from parsemod.cft_others import extract_tokens
from cft_errors_handler import ErrorsHandler
from compile.cft_compile import get_num_type
from py_utils import isnotfinished
from lexermod.cft_token import *
import parsemod.cft_ops as ops
from copy import deepcopy
def _is_type_expression(
tokens: list[Token] | Token,
errors_handler: ErrorsHandler,
path: str,
namehandler: NameHandler,
i: int = 0
) -> bool:
tokens = extract_tokens(tokens, i)
if tokens is None:
return False
if is_name(tokens, errors_handler, path, namehandler, debug_info=_is_type_expression.__name__):
composed_name = compose_name(tokens)
if namehandler.has_globalname(composed_name):
if namehandler.isinstance(composed_name, '$struct'):
return True
errors_handler.final_push_segment(
path,
f'TypeError: name `{get_local_name(composed_name)}` is not a type',
tokens[-1],
fill=True
)
errors_handler.final_push_segment(
path,
f'NameError: name `{get_local_name(composed_name)}` is not defined',
tokens[-1],
fill=True
)
return False
errors_handler.final_push_segment(
path,
f'SyntaxError: invalid syntax',
tokens[-1],
fill=True
)
return False
def _is_name_call_expression(
tokens: list[Token] | Token,
errors_handler: ErrorsHandler,
path: str,
namehandler: NameHandler,
i: int = 0,
without_tail=False # if True tokens after call name expression are not taken into account
):
tokens = tokens[i:]
parenthesis_index = -1
for k in range(len(tokens)):
if tokens[k].type == TokenTypes.PARENTHESIS:
parenthesis_index = k
break
if parenthesis_index == -1 or not is_name(
tokens[:parenthesis_index], errors_handler, path, namehandler, debug_info=_is_name_call_expression.__name__
) or (
without_tail and len(tokens) > (parenthesis_index + 1)
):
return False
return True
def _is_value_expression(
tokens: list[Token] | Token,
errors_handler: ErrorsHandler,
path: str,
namehandler: NameHandler,
i: int = 0
) -> bool:
"""<expr>"""
tokens = extract_tokens(tokens, i)
if tokens is None:
return False
if len(tokens) == 1:
if tokens[0].type in (TokenTypes.NUMBER, TokenTypes.STRING) or is_name(
tokens[0], errors_handler, path, namehandler, debug_info=_is_value_expression.__name__
) or is_kw(tokens[0], ('True', 'False')):
return True
if tokens[0].type == TokenTypes.TUPLE:
for item in tokens[0].value:
if not _is_value_expression(item, errors_handler, path, namehandler):
return False
return True
if tokens[0].type in (TokenTypes.PARENTHESIS, TokenTypes.SQUARE_BRACKETS, TokenTypes.CURLY_BRACES):
return not tokens[0].value or _is_value_expression(tokens[0].value, errors_handler, path, namehandler)
elif ops.is_op(tokens[0], source=ops.LOPS) and _is_value_expression(tokens, errors_handler, path, namehandler, 1):
# LOPS check
return True
else:
iop = -1
for k in range(len(tokens)):
if ops.is_op(tokens[k], source=ops.MIDDLE_OPS):
iop = k
break
if iop != -1:
if (_is_name_call_expression(
tokens[:iop], errors_handler, path, namehandler, without_tail=True
) or _is_value_expression(
tokens[:iop], errors_handler, path, namehandler
)) and _is_value_expression(tokens, errors_handler, path, namehandler, iop + 1):
return True
elif _is_name_call_expression(tokens, errors_handler, path, namehandler, without_tail=True):
# calling name expression check
return True
return False
def _generate_name_call_expression(
tokens: list[Token] | Token,
errors_handler: ErrorsHandler,
path: str,
namehandler: NameHandler
):
parenthesis_index = 1
while parenthesis_index < len(tokens):
if tokens[parenthesis_index].type == TokenTypes.PARENTHESIS:
break
parenthesis_index += 1
name = compose_name(tokens[:parenthesis_index])
if not namehandler.isinstance(name, ('fn', '$struct')):
errors_handler.final_push_segment(
path,
f'NameError: name `{get_local_name(name)}` is not a function or structure',
tokens[parenthesis_index - 1],
fill=True
)
return {}
args_tokens = []
if tokens[parenthesis_index].value:
if tokens[parenthesis_index].value[0].type == TokenTypes.TUPLE:
args_tokens = tokens[parenthesis_index].value[0].value
if not args_tokens[-1]:
del args_tokens[-1]
else:
args_tokens = [tokens[parenthesis_index].value]
namehandler_obj = deepcopy(namehandler.get_current_body(name))
if namehandler_obj['type'] == '$struct':
args_dict = namehandler_obj['value']
expected_kwargs = list(args_dict.keys())
max_args = positional_args = len(expected_kwargs)
returned_type = get_abs_composed_name(namehandler_obj)
else:
args_dict = namehandler_obj['args']
expected_kwargs = list(args_dict.keys())
max_args = namehandler_obj['max-args']
positional_args = namehandler_obj['positional-args']
returned_type = namehandler_obj['returned-type']
required_positional_args = len(args_tokens)
if required_positional_args > max_args:
errors_handler.final_push_segment(
path,
f'TypeError: {get_local_name(name)}() takes {max_args} positional arguments '
f'but {required_positional_args} was given',
tokens[parenthesis_index],
fill=True
)
return {}
if required_positional_args < positional_args:
missing = positional_args - required_positional_args
missed_args = expected_kwargs[required_positional_args: positional_args]
error_tail = f'`{missed_args[-1]}`'
if missing > 1:
error_tail = f'`{missed_args[-2]}` and ' + error_tail
if missing > 2:
for missed_arg in missed_args[:-2][::-1]:
error_tail = f'`{missed_arg}`, ' + error_tail
error_tail = ('' if missing == 1 else 's') + ': ' + error_tail
errors_handler.final_push_segment(
path,
f'TypeError: {get_local_name(name)}() missing {missing} required positional argument' + error_tail,
tokens[parenthesis_index],
fill=True
)
return {}
args = []
for i in range(len(args_tokens)):
arg_tokens = args_tokens[i]
if not arg_tokens:
break
arg = _generate_expression_syntax_object(arg_tokens, errors_handler, path, namehandler)
if errors_handler.has_errors():
return {}
del arg['$tokens-len']
expected_type = args_dict[expected_kwargs[i]]
expected_type = expected_type['type'] if expected_type['value'] is None \
else get_value_returned_type(expected_type['value'])
if arg['returned-type'] != '$undefined' and get_value_returned_type(arg) != expected_type:
errors_handler.final_push_segment(
path,
f'TypeError: expected type `{expected_type}`, got `{get_value_returned_type(arg)}`',
arg_tokens[0],
fill=True
)
args.append(arg)
if namehandler_obj['type'] == '$struct':
fields = namehandler_obj['value']
k = 0
for key in fields:
fields[key]['value'] = args[k]
del fields[key]['*parent']
k += 1
return {
'type': '$init-cls',
'called-name': name,
'fields': fields,
'returned-type': returned_type,
'$has-effect': True,
'$constant-expr': False
}
else:
return {
'type': '$call-name',
'called-name': name,
'args': args,
'returned-type': returned_type,
'$has-effect': True, # temp value
'$constant-expr': False # temp value
}
def _generate_expression_syntax_object(
tokens: list[Token] | Token,
errors_handler: ErrorsHandler,
path: str,
namehandler: NameHandler,
i: int = 0,
right_i: int = 0,
expected_type: dict | str = ...,
effect_checker=False
):
tokens = extract_tokens(tokens, i)
tokens = tokens[:len(tokens) - right_i]
res = {
'$tokens-len': len(tokens), # temp value
'$has-effect': False, # temp value,
'$constant-expr': True # temp value
}
if len(tokens) == 1:
res['returned-type'] = '$self' # it is necessary to refer to key 'type'
token = tokens[0]
if token.type == TokenTypes.STRING:
# includes strings like `'Hello World'` or `"Hello World"`, and chars like `c'H'` or `c"H"`
_index = token.value.index(token.value[-1])
res.update({
# `c` is prefix before quotes that's means is char, not string
'type': str_type if 'c' not in token.value[:_index].lower() else ['$', 'char'],
'value': token.value[_index + 1:-1]
})
elif token.type == TokenTypes.NUMBER:
# includes any number format like integer or decimal
res.update({
'type': ['$', get_num_type(token.value)],
'value': token.value
})
elif token.type == TokenTypes.NAME:
res['value'] = token.value
if token.value in ('True', 'False'):
res['type'] = ['$', 'bool']
elif not namehandler.has_globalname(token.value):
errors_handler.final_push_segment(
path,
f'NameError: name `{token.value}` is not defined',
tokens[0],
fill=True
)
return {}
else:
res.update({
'type': 'name',
'$constant-expr': False
})
_obj = namehandler.get_current_body(token.value)
if namehandler.isinstance(token.value, 'fn'):
res['returned-type'] = _obj['returned-type']
else:
res['returned-type'] = _obj['type']
elif token.type == TokenTypes.TUPLE:
# <expression>, <expression>
isnotfinished()
res.update({
'type': 'tuple',
'value': []
})
for item in token.value:
res['value'].append(_generate_expression_syntax_object(item, errors_handler, path, namehandler))
del res['value'][-1]['$tokens-len']
elif token.type in (TokenTypes.PARENTHESIS, TokenTypes.SQUARE_BRACKETS, TokenTypes.CURLY_BRACES):
isnotfinished()
if not token.value:
res.update({
'type': {
TokenTypes.PARENTHESIS: 'tuple',
TokenTypes.SQUARE_BRACKETS: 'list',
TokenTypes.CURLY_BRACES: 'dict'
}[token.type],
'value': []
})
else:
res = _generate_expression_syntax_object(token.value, errors_handler, path, namehandler)
if token.type != TokenTypes.PARENTHESIS:
res['type'] = 'list' if token.type == TokenTypes.SQUARE_BRACKETS else 'set'
if res['type'] != 'tuple':
res['value'] = [res['value']]
elif _is_name_call_expression(tokens, errors_handler, path, namehandler, without_tail=True):
res.update(
_generate_name_call_expression(tokens, errors_handler, path, namehandler)
)
else:
res.update(ops.generate_op_expression(
tokens,
errors_handler,
path,
namehandler,
_generate_expression_syntax_object,
_is_name_call_expression,
_generate_name_call_expression
))
if errors_handler.has_errors():
return {}
if get_value_returned_type(res) == '$undefined':
errors_handler.final_push_segment(
path,
'unpredictable behavior (it is impossible to calculate the return type)',
tokens[0],
type=ErrorsHandler.WARNING
)
elif expected_type is not ... and get_value_returned_type(res) != expected_type:
errors_handler.final_push_segment(
path,
f'TypeError: expected type `{expected_type}`, got `{get_value_returned_type(res)}`',
tokens[0],
fill=True
)
return {}
if not effect_checker:
del res['$has-effect']
return res
__all__ = (
'_is_value_expression',
'_generate_expression_syntax_object',
'_is_type_expression'
)
| 32.343529 | 119 | 0.576531 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,081 | 0.151389 |
f05136d88821cb5ba7ae7358c44d2b30837eb2b2 | 938 | py | Python | src/ychaos/cli/exceptions/__init__.py | vanderh0ff/ychaos | 5148c889912b744ee73907e4dd30c9ddb851aeb3 | [
"Apache-2.0"
] | 8 | 2021-07-21T15:37:48.000Z | 2022-03-03T14:43:09.000Z | src/ychaos/cli/exceptions/__init__.py | vanderh0ff/ychaos | 5148c889912b744ee73907e4dd30c9ddb851aeb3 | [
"Apache-2.0"
] | 102 | 2021-07-20T16:08:29.000Z | 2022-03-25T07:28:37.000Z | src/ychaos/cli/exceptions/__init__.py | vanderh0ff/ychaos | 5148c889912b744ee73907e4dd30c9ddb851aeb3 | [
"Apache-2.0"
] | 8 | 2021-07-20T13:37:46.000Z | 2022-02-18T01:44:52.000Z | # Copyright 2021, Yahoo
# Licensed under the terms of the Apache 2.0 license. See the LICENSE file in the project root for terms
from abc import abstractmethod
from typing import Any, Dict
class YChaosCLIError(Exception):
exitcode = 1
def __init__(self, app, message, **kwargs):
self.app = app
self.message: str = message
self.attrs: Dict[str, Any] = kwargs
@abstractmethod
def handle(self) -> None:
"""
Handle is the method that is called during teardown to handle a particular type of error
Any subcommand can raise a sub class of YChaosCLIError and forget about the exception.
The main teardown module takes responsibility of calling the handle method
This can be used to print message of exception or handle the panic in a suitable way
"""
if self.app.is_debug_mode():
self.app.console.print_exception(extra_lines=2)
| 33.5 | 105 | 0.687633 | 743 | 0.792111 | 0 | 0 | 535 | 0.570362 | 0 | 0 | 513 | 0.546908 |
f051fa84986a7c9f1470f505282adadb784afefe | 5,289 | py | Python | ibmsecurity/isam/base/fips.py | zone-zero/ibmsecurity | 7d3e38104b67e1b267e18a44845cb756a5302c3d | [
"Apache-2.0"
] | 46 | 2017-03-21T21:08:59.000Z | 2022-02-20T22:03:46.000Z | ibmsecurity/isam/base/fips.py | zone-zero/ibmsecurity | 7d3e38104b67e1b267e18a44845cb756a5302c3d | [
"Apache-2.0"
] | 201 | 2017-03-21T21:25:52.000Z | 2022-03-30T21:38:20.000Z | ibmsecurity/isam/base/fips.py | zone-zero/ibmsecurity | 7d3e38104b67e1b267e18a44845cb756a5302c3d | [
"Apache-2.0"
] | 91 | 2017-03-22T16:25:36.000Z | 2022-02-04T04:36:29.000Z | import logging
import ibmsecurity.utilities.tools
import time
logger = logging.getLogger(__name__)
requires_model = "Appliance"
def get(isamAppliance, check_mode=False, force=False):
"""
Retrieving the current FIPS Mode configuration
"""
return isamAppliance.invoke_get("Retrieving the current FIPS Mode configuration",
"/fips_cfg", requires_model=requires_model)
def set(isamAppliance, fipsEnabled=True, tlsv10Enabled=True, tlsv11Enabled=False, check_mode=False, force=False):
"""
Updating the FIPS Mode configuration
"""
obj = _check(isamAppliance, fipsEnabled, tlsv10Enabled, tlsv11Enabled)
if force is True or obj['value'] is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=obj['warnings'])
else:
return isamAppliance.invoke_put(
"Updating the FIPS Mode configuration",
"/fips_cfg",
{
"fipsEnabled": fipsEnabled,
"tlsv10Enabled": tlsv10Enabled,
"tlsv11Enabled": tlsv11Enabled
},
requires_model=requires_model
)
return isamAppliance.create_return_object(warnings=obj['warnings'])
def restart(isamAppliance, check_mode=False, force=False):
"""
Rebooting and enabling the FIPS Mode configuration
:param isamAppliance:
:param check_mode:
:param force:
:return:
"""
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_put(
"Rebooting and enabling the FIPS Mode configuration",
"/fips_cfg/restart",
{}, requires_model=requires_model
)
def restart_and_wait(isamAppliance, wait_time=300, check_freq=5, check_mode=False, force=False):
"""
Restart after FIPS configuration changes
:param isamAppliance:
:param wait_time:
:param check_freq:
:param check_mode:
:param force:
:return:
"""
if isamAppliance.facts['model'] != "Appliance":
return isamAppliance.create_return_object(
warnings="API invoked requires model: {0}, appliance is of deployment model: {1}.".format(
requires_model, isamAppliance.facts['model']))
warnings = []
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
firmware = ibmsecurity.isam.base.firmware.get(isamAppliance, check_mode=check_mode, force=force)
ret_obj = restart(isamAppliance)
if ret_obj['rc'] == 0:
sec = 0
# Now check if it is up and running
while 1:
ret_obj = ibmsecurity.isam.base.firmware.get(isamAppliance, check_mode=check_mode, force=force,
ignore_error=True)
# check partition last_boot time
if ret_obj['rc'] == 0 and isinstance(ret_obj['data'], list) and len(ret_obj['data']) > 0 and \
(('last_boot' in ret_obj['data'][0] and ret_obj['data'][0]['last_boot'] != firmware['data'][0][
'last_boot'] and ret_obj['data'][0]['active'] == True) or (
'last_boot' in ret_obj['data'][1] and ret_obj['data'][1]['last_boot'] !=
firmware['data'][1]['last_boot'] and ret_obj['data'][1]['active'] == True)):
logger.info("Server is responding and has a different boot time!")
return isamAppliance.create_return_object(warnings=warnings)
else:
time.sleep(check_freq)
sec += check_freq
logger.debug(
"Server is not responding yet. Waited for {0} secs, next check in {1} secs.".format(sec,
check_freq))
if sec >= wait_time:
warnings.append(
"The FIPS restart not detected or completed, exiting... after {0} seconds".format(sec))
break
return isamAppliance.create_return_object(warnings=warnings)
def _check(isamAppliance, fipsEnabled, tlsv10Enabled, tlsv11Enabled):
obj = {'value': True, 'warnings': ""}
ret_obj = get(isamAppliance)
obj['warnings'] = ret_obj['warnings']
if ret_obj['data']['fipsEnabled'] != fipsEnabled:
logger.info("fipsEnabled change to {0}".format(fipsEnabled))
obj['value'] = False
return obj
if ret_obj['data']['tlsv10Enabled'] != tlsv10Enabled:
logger.info("TLS v1.0 change to {0}".format(tlsv10Enabled))
obj['value'] = False
return obj
if ret_obj['data']['tlsv11Enabled'] != tlsv11Enabled:
logger.info("TLS v1.1 change to {0}".format(tlsv11Enabled))
obj['value'] = False
return obj
return obj
def compare(isamAppliance1, isamAppliance2):
ret_obj1 = get(isamAppliance1)
ret_obj2 = get(isamAppliance2)
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=[])
| 36.729167 | 120 | 0.592362 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,415 | 0.267536 |
f052b9fc28af42e699049bdfe2b0ac01d467c316 | 187 | py | Python | user_details/give_default.py | Shreyanshsachan/College-Predictor | 87068aa1d1a889ced586ff155bc2b5d9a78340f7 | [
"MIT"
] | null | null | null | user_details/give_default.py | Shreyanshsachan/College-Predictor | 87068aa1d1a889ced586ff155bc2b5d9a78340f7 | [
"MIT"
] | null | null | null | user_details/give_default.py | Shreyanshsachan/College-Predictor | 87068aa1d1a889ced586ff155bc2b5d9a78340f7 | [
"MIT"
] | null | null | null | preference_list_of_user=[]
def give(def_list):
Def=def_list
global preference_list_of_user
preference_list_of_user=Def
return Def
def give_to_model():
return preference_list_of_user | 20.777778 | 31 | 0.84492 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f0543d6f05ce0f050bd473fdf2e6349ee2b5262a | 953 | py | Python | face_recognition/project/type_hints.py | dgr113/face-recognition | edda6ca8fef567d24ae740afd2399b66166f3431 | [
"MIT"
] | null | null | null | face_recognition/project/type_hints.py | dgr113/face-recognition | edda6ca8fef567d24ae740afd2399b66166f3431 | [
"MIT"
] | null | null | null | face_recognition/project/type_hints.py | dgr113/face-recognition | edda6ca8fef567d24ae740afd2399b66166f3431 | [
"MIT"
] | null | null | null | # coding: utf-8
import numpy as np
import keras.utils
from pathlib import Path
from typing import Tuple, Sequence, Union, Hashable, Iterable, Mapping, Any
### COMMON TYPES
UNIVERSAL_PATH_TYPE = Union[Path, str]
UNIVERSAL_SOURCE_TYPE = Union[UNIVERSAL_PATH_TYPE, Mapping]
CHUNKED_DATA_TYPE = Tuple[Sequence[np.ndarray], Sequence[int]]
ONE_MORE_KEYS = Union[Hashable, Iterable[Hashable]]
VALIDATE_RESUTS_TYPE = Tuple[str, Union[Mapping, None]]
COORDS_TYPE = Tuple[int, int, int, int]
KEYS_OR_NONE_TYPE = Union[Sequence[Hashable], None]
### USER INPUT DATA TYPES
PERSONS_DATA_TYPE = Mapping[str, Mapping[str, str]]
CAMERA_DATA_TYPE = Mapping[str, Any]
FRAME_SHAPE_TYPE = Tuple[int, int, int]
### LEARN MODEL TYPES
MODEL_CONFIG_TYPE = Mapping[str, Union[str, int, list, None]]
TRAIN_DATA_TYPE = Sequence[np.ndarray]
TRAIN_LABELS_TYPE = Sequence[np.ndarray]
TRAIN_DATA_GEN_TYPE = Union[Tuple[TRAIN_DATA_TYPE, TRAIN_LABELS_TYPE], keras.utils.Sequence]
| 31.766667 | 92 | 0.781742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.080797 |
f056587ea945052209153f421a5cb7898e82cd98 | 4,021 | py | Python | modules/augmentation.py | AdamMiltonBarker/hias-all-oneapi-classifier | 7afdbcde0941b287df2e153d64e14d06f2341aa2 | [
"MIT"
] | 1 | 2021-04-30T21:13:11.000Z | 2021-04-30T21:13:11.000Z | modules/augmentation.py | AdamMiltonBarker/hias-all-oneapi-classifier | 7afdbcde0941b287df2e153d64e14d06f2341aa2 | [
"MIT"
] | 3 | 2021-09-18T20:02:05.000Z | 2021-09-21T19:18:16.000Z | modules/augmentation.py | AIIAL/oneAPI-Acute-Lymphoblastic-Leukemia-Classifier | 05fb9cdfa5069b16cfe439be6d94d21b9eb21723 | [
"MIT"
] | 1 | 2021-09-19T01:19:40.000Z | 2021-09-19T01:19:40.000Z | #!/usr/bin/env python
""" HIAS AI Model Data Augmentation Class.
Provides data augmentation methods.
MIT License
Copyright (c) 2021 Asociación de Investigacion en Inteligencia Artificial
Para la Leucemia Peter Moss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files(the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Contributors:
- Adam Milton-Barker - First version - 2021-5-2
"""
import cv2
import random
import numpy as np
from numpy.random import seed
from scipy import ndimage
from skimage import transform as tm
class augmentation():
""" HIAS AI Model Data Augmentation Class
Provides data augmentation methods.
"""
def __init__(self, helpers):
""" Initializes the class. """
self.helpers = helpers
self.seed = self.helpers.confs["data"]["seed"]
seed(self.seed)
self.helpers.logger.info(
"Augmentation class initialization complete.")
def grayscale(self, data):
""" Creates a grayscale copy. """
gray = cv2.cvtColor(data, cv2.COLOR_BGR2GRAY)
return np.dstack([gray, gray, gray]).astype(np.float32)/255.
def equalize_hist(self, data):
""" Creates a histogram equalized copy. """
img_to_yuv = cv2.cvtColor(data, cv2.COLOR_BGR2YUV)
img_to_yuv[:, :, 0] = cv2.equalizeHist(img_to_yuv[:, :, 0])
hist_equalization_result = cv2.cvtColor(img_to_yuv, cv2.COLOR_YUV2BGR)
return hist_equalization_result.astype(np.float32)/255.
def reflection(self, data):
""" Creates a reflected copy. """
return cv2.flip(data, 0).astype(np.float32)/255., cv2.flip(data, 1).astype(np.float32)/255.
def gaussian(self, data):
""" Creates a gaussian blurred copy. """
return ndimage.gaussian_filter(
data, sigma=5.11).astype(np.float32)/255.
def translate(self, data):
""" Creates transformed copy. """
cols, rows, chs = data.shape
return cv2.warpAffine(
data, np.float32([[1, 0, 84], [0, 1, 56]]), (rows, cols),
borderMode=cv2.BORDER_CONSTANT,
borderValue=(144, 159, 162)).astype(np.float32)/255.
def rotation(self, data, label, tdata, tlabels):
""" Creates rotated copies. """
cols, rows, chs = data.shape
for i in range(0, self.helpers.confs["data"]["rotations"]):
# Seed needs to be set each time randint is called
random.seed(self.seed)
rand_deg = random.randint(-180, 180)
matrix = cv2.getRotationMatrix2D(
(cols/2, rows/2), rand_deg, 0.70)
rotated = cv2.warpAffine(
data, matrix, (rows, cols),
borderMode=cv2.BORDER_CONSTANT,
borderValue=(144, 159, 162))
rotated = rotated.astype(np.float32)/255.
tdata.append(rotated)
tlabels.append(label)
return tdata, tlabels
def shear(self, data):
""" Creates a histogram equalized copy. """
at = tm.AffineTransform(shear=0.5)
return tm.warp(data, inverse_map=at)
| 32.691057 | 99 | 0.665257 | 2,569 | 0.638737 | 0 | 0 | 0 | 0 | 0 | 0 | 1,810 | 0.450025 |
f05664f755b3f673d926831f9475fb250a901f2c | 792 | py | Python | pytglib/api/types/rich_text_phone_number.py | iTeam-co/pytglib | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 6 | 2019-10-30T08:57:27.000Z | 2021-02-08T14:17:43.000Z | pytglib/api/types/rich_text_phone_number.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 1 | 2021-08-19T05:44:10.000Z | 2021-08-19T07:14:56.000Z | pytglib/api/types/rich_text_phone_number.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 5 | 2019-12-04T05:30:39.000Z | 2021-05-21T18:23:32.000Z |
from ..utils import Object
class RichTextPhoneNumber(Object):
"""
A rich text phone number
Attributes:
ID (:obj:`str`): ``RichTextPhoneNumber``
Args:
text (:class:`telegram.api.types.RichText`):
Text
phone_number (:obj:`str`):
Phone number
Returns:
RichText
Raises:
:class:`telegram.Error`
"""
ID = "richTextPhoneNumber"
def __init__(self, text, phone_number, **kwargs):
self.text = text # RichText
self.phone_number = phone_number # str
@staticmethod
def read(q: dict, *args) -> "RichTextPhoneNumber":
text = Object.read(q.get('text'))
phone_number = q.get('phone_number')
return RichTextPhoneNumber(text, phone_number)
| 21.405405 | 54 | 0.585859 | 760 | 0.959596 | 0 | 0 | 210 | 0.265152 | 0 | 0 | 402 | 0.507576 |
f0571f9fa4afbc250ffbbd1dd3bed40b228bfe8d | 46 | py | Python | cne/__init__.py | BartWojtowicz/cne | 16612292c1c938fc9ec53a14642fb7d40bcc9e25 | [
"Apache-2.0"
] | null | null | null | cne/__init__.py | BartWojtowicz/cne | 16612292c1c938fc9ec53a14642fb7d40bcc9e25 | [
"Apache-2.0"
] | null | null | null | cne/__init__.py | BartWojtowicz/cne | 16612292c1c938fc9ec53a14642fb7d40bcc9e25 | [
"Apache-2.0"
] | null | null | null | from .cne import CNE
__version__ = "0.0.dev"
| 11.5 | 23 | 0.695652 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.195652 |
f0574e6e18439c3b0140a8ed7ddefec8cd1bf416 | 299 | py | Python | tests/core/scenario_finder/file_filters/test_file_filter.py | nikitanovosibirsk/vedro | e975a1c1eb065bc6caa32c41c0d7576ee6d284db | [
"Apache-2.0"
] | 2 | 2021-08-24T12:49:30.000Z | 2022-01-23T07:21:25.000Z | tests/core/scenario_finder/file_filters/test_file_filter.py | nikitanovosibirsk/vedro | e975a1c1eb065bc6caa32c41c0d7576ee6d284db | [
"Apache-2.0"
] | 20 | 2015-12-09T11:04:23.000Z | 2022-03-20T09:18:17.000Z | tests/core/scenario_finder/file_filters/test_file_filter.py | nikitanovosibirsk/vedro | e975a1c1eb065bc6caa32c41c0d7576ee6d284db | [
"Apache-2.0"
] | 3 | 2015-12-09T07:31:23.000Z | 2022-01-28T11:03:24.000Z | from pytest import raises
from vedro._core._scenario_finder._file_filters import FileFilter
def test_file_filter():
with raises(Exception) as exc_info:
FileFilter()
assert exc_info.type is TypeError
assert "Can't instantiate abstract class FileFilter" in str(exc_info.value)
| 24.916667 | 79 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.150502 |
f058c8ff047b3d9464b8a867f927fcd03d622d8f | 31,894 | py | Python | concept_disc/pubmed_dump.py | nmonath/concept_discovery | 766905684e598159cc6fb58967ed411888b93ce5 | [
"MIT"
] | 3 | 2020-09-10T13:48:23.000Z | 2021-08-19T21:42:50.000Z | concept_disc/pubmed_dump.py | nmonath/concept_discovery | 766905684e598159cc6fb58967ed411888b93ce5 | [
"MIT"
] | null | null | null | concept_disc/pubmed_dump.py | nmonath/concept_discovery | 766905684e598159cc6fb58967ed411888b93ce5 | [
"MIT"
] | 3 | 2020-10-16T21:57:04.000Z | 2020-12-26T00:59:32.000Z | """
Parse PubMed Dump
Ref:
https://www.nlm.nih.gov/databases/download/pubmed_medline.html
https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html
https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html#medlinecitation
"""
from collections import defaultdict
from concurrent import futures
import glob
import gzip
import multiprocessing
import os
from pathlib import Path
import re
from threading import Thread
from typing import Dict, Generator, List, Optional, Sequence, Set, Union
# noinspection PyPep8Naming
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
from .misc import PersistentObject
# -----------------------------------------------------------------------------
# Globals
# -----------------------------------------------------------------------------
BASE_DIR = os.path.expanduser('~/Home/Projects/ConceptRecogn')
AB3P_DIR = os.path.join(BASE_DIR, 'Tools', 'Ab3P')
AB3P_CMD = './identify_abbr'
SPACES_PATT = re.compile(r'\s+')
SENTINEL = '_SENTINEL_'
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
class PubmedDocument:
def __init__(self, pmid: str, title: str = None, abstract: str = None, is_english: bool = True):
self.pmid = pmid
self.title = title
self.abstract = abstract
self.is_english = is_english
return
def get_text(self):
txt = "\n".join([s for s in (self.title, self.abstract) if s])
if not txt:
txt = None
return txt
def __str__(self):
return "pmid = {:s}\ntitle = {:s}\nabstract = {:s}".format(self.pmid, self.title, self.abstract)
@classmethod
def from_xml(cls, pubmed_article: ET.Element):
assert pubmed_article.tag == "PubmedArticle"
pmid = pubmed_article.findtext("./MedlineCitation/PMID")
is_english = True
title = extract_subelem_text(pubmed_article.find("./MedlineCitation/Article/ArticleTitle"))
if not title or title == "Not Available":
title = extract_subelem_text(pubmed_article.find("./MedlineCitation/Article/ARTICLETITLE"))
if title:
title = title.strip()
if title.startswith("[") and title.endswith("]"):
title = title.strip("[]")
is_english = False
if title.endswith("(author's transl)"):
title = title[:-len("(author's transl)")].strip()
if title == "In Process Citation":
title = None
abstr = extract_subelem_text(pubmed_article.find("./MedlineCitation/Article/Abstract"))
return cls(pmid, title, abstr, is_english)
# /
class LazyPubmedDocument:
def __init__(self, pmid: str, article_xml: ET.Element, source: str = None):
assert pmid is not None
self.pmid = pmid.strip()
self.article_xml = article_xml
self.source = source
self._title = None
self._abstract = None
self._is_english = None
self._title_parsed = False
return
@property
def title(self):
self._parse_title_abstract()
return self._title
@property
def abstract(self):
self._parse_title_abstract()
return self._abstract
@property
def is_english(self):
self._parse_title_abstract()
return self._is_english
def get_text(self):
txt = "\n".join([s for s in (self.title, self.abstract) if s])
if not txt:
txt = None
return txt
def get_mesh_headings_xml(self) -> List[ET.Element]:
return self.article_xml.findall("./MedlineCitation/MeshHeadingList")
def get_supplemental_mesh_xml(self) -> List[ET.Element]:
"""
This info includes Supplemental Records on: Protocols, Diseases, Organisms
"""
return self.article_xml.findall("./MedlineCitation/SupplMeshList")
def get_chemicals_xml(self) -> List[ET.Element]:
return self.article_xml.findall("./MedlineCitation/ChemicalList")
def get_keywords_xml(self) -> List[ET.Element]:
return self.article_xml.findall("./MedlineCitation/KeywordList")
def _parse_title_abstract(self):
if self._title_parsed:
return
is_english = True
title = extract_subelem_text(self.article_xml.find("./MedlineCitation/Article/ArticleTitle"))
if not title or title == "Not Available":
title = extract_subelem_text(self.article_xml.find("./MedlineCitation/Article/ARTICLETITLE"))
if title:
title = title.strip()
if title.startswith("[") and title.endswith("]"):
title = title.strip("[]")
is_english = False
if title.endswith("(author's transl)"):
title = title[:-len("(author's transl)")].strip()
if title == "In Process Citation":
title = ""
self._title = title
self._is_english = is_english
self._abstract = extract_subelem_text(self.article_xml.find("./MedlineCitation/Article/Abstract"))
self._title_parsed = True
return
def to_xml(self) -> ET.Element:
"""
Output format as parsed by `Article`
"""
doc = ET.Element("Article", pmid=self.pmid)
if self.source:
doc.set("source", self.source)
ET.SubElement(doc, "Title").text = self.title
ET.SubElement(doc, "Abstract").text = self.abstract
for children in [self.get_mesh_headings_xml(),
self.get_supplemental_mesh_xml(),
self.get_chemicals_xml(),
self.get_keywords_xml()]:
if children:
doc.extend(children)
return doc
def __str__(self):
return "pmid = {:s}\ntitle = {:s}\nabstract = {:s}".format(self.pmid, self.title, self.abstract)
@classmethod
def from_pubmed_xml(cls, pubmed_article: ET, source: str = None):
assert pubmed_article.tag == "PubmedArticle"
pmid = pubmed_article.findtext("./MedlineCitation/PMID")
return cls(pmid, pubmed_article, source=source)
# /
class PubmedDumpIndex(PersistentObject):
def __init__(self):
super().__init__()
# Dir where all the dump files exist.
# dump_file_path = {base_dir}/{dump_file_name}
self.base_dir = None
# dump_file_name(str) -> List[pmid(str)]
self.dumpfile_index = dict()
# pmid(str) -> dump_file_name(str)
self.docid_index = None
return
def get_dump_file(self, pmid: str) -> Optional[str]:
"""
Returns absolute path (str) to file containing Doc with specified `pmid`,
or None if not found.
"""
if self.docid_index is None:
self._build_docid_index()
fname = self.docid_index.get(pmid)
if fname is not None:
return f"{self.base_dir}/{fname}"
return
def get_dump_files(self, pmids: Sequence[str]) -> Dict[str, str]:
if self.docid_index is None:
self._build_docid_index()
pmid_file_dict = {pmid_ : self.get_dump_file(pmid_) for pmid_ in pmids}
return pmid_file_dict
def get_doc(self, pmid: str) -> Optional[LazyPubmedDocument]:
dump_file = self.get_dump_file(pmid)
if dump_file is None:
return
for doc in lazy_parse_dump_file(dump_file):
if doc.pmid == pmid:
return doc
return
def get_docs(self, pmids: Sequence[str]) -> Generator[LazyPubmedDocument, None, None]:
"""
Generator yields LazyPubmedDocument for docs found for PMID in pmids.
Order may be different. Only found docs are returned.
"""
pmid_file_dict = self.get_dump_files(pmids)
file_pmids = defaultdict(set)
for pmid, fpath in pmid_file_dict.items():
file_pmids[fpath].add(pmid)
for dump_fpath, pmid_set in file_pmids.items():
n_pmids = len(pmid_set)
for doc in lazy_parse_dump_file(dump_fpath):
if doc.pmid in pmid_set:
yield doc
n_pmids -= 1
if n_pmids == 0:
break
return
def _build_docid_index(self):
self.docid_index = dict()
for fpath, pmids in self.dumpfile_index.items():
for pmid_ in pmids:
self.docid_index[pmid_] = fpath
return
@staticmethod
def build_save_index(pubmed_dump_files_or_patt: Union[str, List[str]],
output_file: str,
nprocs: int):
"""
Run `nprocs` processes to build an index into `pubmed_dump_files`,
and save it to `output_file`.
:param pubmed_dump_files_or_patt: Glob pattern or list of paths containing Pubmed-Dump
Assumes that all the files are in the same directory!
:param output_file: Where each index will be saved, as a Pickle file (*.pkl)"
:param nprocs:
"""
print("PubmedDumpIndex.build_save_index:")
print(" pubmed_dump_files_or_patt =", pubmed_dump_files_or_patt)
print(" output_file =", output_file)
print(" nprocs =", nprocs)
output_file = os.path.expanduser(output_file)
output_dir = os.path.dirname(output_file)
output_dir = os.path.expanduser(output_dir)
if not Path(output_dir).exists():
print("Creating dir:", output_dir)
Path(output_dir).mkdir()
print('Starting {} processes ...'.format(nprocs), flush=True)
m = multiprocessing.Manager()
res_queue = m.Queue()
# Using a process pool to start the sub-processes. Allows gathering return values.
# With this method, Queue instance must be inherited by the sub-processes (e.g. as a global);
# passing queue as an arg results in RuntimeError.
with futures.ProcessPoolExecutor(max_workers=nprocs) as executor:
results = executor.map(PubmedDumpIndex.build_index_procr,
[pubmed_dump_files_or_patt] * nprocs,
[res_queue] * nprocs,
range(nprocs), [nprocs] * nprocs)
pmindex = PubmedDumpIndex()
# Put Queue consumer in a Thread
t = Thread(target=pmindex._gather_file_docids, args=(nprocs, res_queue), daemon=False)
t.start()
# Join the consumer Thread until it is done
t.join()
# Get return values ... possible if processes started using ProcessPoolExecutor
tot_docs_found = 0
for (proc_nbr, docs_found) in results:
print('... Sub-process {:d} found {:,d} docs'.format(proc_nbr, docs_found), flush=True)
tot_docs_found += docs_found
print('Total nbr docs written = {:,d}'.format(tot_docs_found))
pmindex.save(output_file)
return
@staticmethod
def build_index_procr(pubmed_dump_files_or_patt: Union[str, List[str]],
res_queue,
proc_nbr: int, nprocs: int):
assert 0 <= proc_nbr < nprocs
if isinstance(pubmed_dump_files_or_patt, List):
pubmed_dump_files = [os.path.expanduser(f) for f in pubmed_dump_files_or_patt]
else:
pubmed_dump_files = glob.glob(os.path.expanduser(pubmed_dump_files_or_patt))
# Ensure each process sees same ordering
pubmed_dump_files = sorted(pubmed_dump_files)
tot_docs_found = 0
# Process every `nprocs`-th file starting at index `proc_nbr`
for fi in range(proc_nbr, len(pubmed_dump_files), nprocs):
file_pmids = []
for doc in lazy_parse_dump_file(pubmed_dump_files[fi]):
file_pmids.append(doc.pmid)
res_queue.put(('add', proc_nbr, pubmed_dump_files[fi], file_pmids))
tot_docs_found += len(file_pmids)
res_queue.put((SENTINEL, proc_nbr))
return proc_nbr, tot_docs_found
def _gather_file_docids(self, nprocs: int, res_queue):
n_dump_files_processed = 0
while nprocs > 0:
qry_data = res_queue.get()
if qry_data[0] == SENTINEL:
nprocs -= 1
print('... Sub-process {} end recd.'.format(qry_data[1]), flush=True)
else:
n_dump_files_processed += 1
_, proc_nbr, pubmed_dump_file, file_pmids = qry_data
base_dir, file_name = os.path.split(pubmed_dump_file)
if self.base_dir is None:
self.base_dir = base_dir
self.dumpfile_index[file_name] = file_pmids
print("Nbr dump files processed = {:,d}".format(n_dump_files_processed), flush=True)
return
# /
# -----------------------------------------------------------------------------
# Article - from PubMed or MeSH-Dump
# -----------------------------------------------------------------------------
class MeshHeading:
def __init__(self, uid: str, name: str, is_major: bool):
self.uid = uid
self.name = name
self.is_major = is_major
return
def __str__(self):
return "{:s}: {:s}{:s}".format(self.uid, self.name, " *" if self.is_major else "")
# /
class SupplMeshName:
def __init__(self, uid: str, name: str, suppl_type: str):
self.uid = uid
self.name = name
self.suppl_type = suppl_type
return
def __str__(self):
return "{:s}: {:s} [{:s}]".format(self.uid, self.name, self.suppl_type)
# /
class Qualifier(MeshHeading):
def __init__(self, uid: str, name: str, is_major: bool):
super().__init__(uid, name, is_major)
return
# /
class MainHeading(MeshHeading):
def __init__(self, uid: str, name: str, is_major: bool):
super().__init__(uid, name, is_major)
# Whether a Qualifier is marked as Major
self.is_qualified_major: bool = False
self.qualifiers: Set[Qualifier] = set()
return
def add_qualifier(self, qlfr: Qualifier):
self.qualifiers.add(qlfr)
if qlfr.is_major:
self.is_qualified_major = True
return
def __str__(self):
mystr = super().__str__()
if self.qualifiers:
mystr += " / " + ", ".join([str(qlfr) for qlfr in self.qualifiers])
return mystr
# /
class Keyword:
def __init__(self, name: str, is_major: bool):
self.name = name
self.is_major = is_major
return
def __str__(self):
return "{:s}{:s}".format(self.name, " *" if self.is_major else "")
# /
class Article:
def __init__(self, pmid: str, title: str, abstract: Optional[str]):
self.pmid = pmid
self.abstract = abstract
self.is_english = True
if title:
title = title.strip()
if title.startswith("[") and title.endswith("]"):
title = title.strip("[]")
self.is_english = False
if title.endswith("(author's transl)"):
title = title[:-len("(author's transl)")].strip()
if title == "In Process Citation":
title = None
self.title = title or ""
self.main_headings: List[MainHeading] = []
self.suppl_concept_records: List[SupplMeshName] = []
self.keywords: List[Keyword] = []
return
def to_xml(self, pubmed_format: bool = False) -> ET.Element:
"""
Get this article as an XML element.
:param pubmed_format: Use XML format as returned by PubMed API ... PubmedArticle/MedlineCitation
"""
def format_title():
return escape(self.title if self.is_english else "[" + self.title + "]")
def is_yn(flag: bool):
return "Y" if flag else "N"
if pubmed_format:
root = ET.Element("PubmedArticle")
medline = ET.SubElement(root, "MedlineCitation")
ET.SubElement(medline, "PMID").text = self.pmid
article = ET.SubElement(medline, "Article")
ET.SubElement(article, "ArticleTitle").text = format_title()
if self.abstract:
ET.SubElement(article, "Abstract").text = escape(self.abstract)
axml = medline
else:
root = ET.Element("Article", pmid=self.pmid)
ET.SubElement(root, "Title").text = format_title()
if self.abstract:
ET.SubElement(root, "Abstract").text = escape(self.abstract)
axml = root
if self.main_headings:
mhlist = ET.SubElement(axml, "MeshHeadingList")
for mhdg in self.main_headings:
mh_xml = ET.SubElement(mhlist, "MeshHeading")
mh_descr = ET.SubElement(mh_xml, "DescriptorName", UI=mhdg.uid, MajorTopicYN=is_yn(mhdg.is_major))
mh_descr.text = escape(mhdg.name)
for qlfr in mhdg.qualifiers:
q_xml = ET.SubElement(mh_xml, "QualifierName", UI=mhdg.uid, MajorTopicYN=is_yn(mhdg.is_major))
q_xml.text = escape(qlfr.name)
if self.suppl_concept_records:
scr_list = ET.SubElement(axml, "SupplMeshList")
for scr in self.suppl_concept_records:
scr_xml = ET.SubElement(scr_list, "SupplMeshName", UI=scr.uid, Type=escape(scr.suppl_type))
scr_xml.text = escape(scr.name)
if self.keywords:
kwd_list = ET.SubElement(axml, "KeywordList")
for kwd in self.keywords:
kwd_xml = ET.SubElement(kwd_list, "Keyword", MajorTopicYN=is_yn(kwd.is_major))
kwd_xml.text = escape(kwd.name)
return root
def get_major_headings(self):
return [hdg for hdg in self.main_headings if hdg.is_major or hdg.is_qualified_major]
@staticmethod
def from_xml_file(article_xml_file: str):
tree = ET.parse(article_xml_file)
return Article.from_xml_root(tree.getroot())
# noinspection PyTypeChecker
@staticmethod
def from_xml_root(root: ET.Element):
if root.tag == "Article":
pmid = root.get('pmid')
title = extract_subelem_text(root.find("./Title"))
abstr = extract_subelem_text(root.find("./Abstract"))
elif root.tag == "PubmedArticle":
# All the tags of interest are under './MedlineCitation'
root = root.find("./MedlineCitation")
pmid = root.findtext("./PMID")
title = extract_subelem_text(root.find("./Article/ArticleTitle"))
if not title or title == "Not Available":
title = extract_subelem_text(root.find("./Article/ARTICLETITLE"))
abstr = extract_subelem_text(root.find("./Article/Abstract"))
else:
raise NotImplementedError(f"Cannot parse root.tag = {root.tag}. Should be one of: Article, PubmedArticle")
article = Article(pmid, title, abstr)
for mh_elem in root.findall("./MeshHeadingList/MeshHeading"):
d_elem = mh_elem.find("./DescriptorName")
main_hdg = MainHeading(d_elem.get("UI"), d_elem.text, d_elem.get("MajorTopicYN", "N") == "Y")
article.main_headings.append(main_hdg)
for q_elem in mh_elem.findall("./QualifierName"):
main_hdg.add_qualifier(Qualifier(q_elem.get("UI"), q_elem.text, q_elem.get("MajorTopicYN", "N") == "Y"))
for sm_elem in root.findall("./SupplMeshList/SupplMeshName"):
scr = SupplMeshName(sm_elem.get("UI"), sm_elem.text, sm_elem.get("Type"))
article.suppl_concept_records.append(scr)
for kw_elem in root.findall("./KeywordList/Keyword"):
kwd = Keyword(kw_elem.text, kw_elem.get("MajorTopicYN", "N") == "Y")
article.keywords.append(kwd)
return article
# /
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
def extract_subelem_text(xelem):
"""
Extracts and combines text from sub-elements of `xelem`.
:param xml.etree.ElementTree.Element xelem: xml.etree.ElementTree.Element.
:return: str
Special Cases
-------------
<title>GeneReviews<sup>®</sup></title> => 'GeneReviews ®'
R<sub>0</sub> => R0
<i>text</i> => text
<b>text</b> => text
<u>text</u> => text
will be extracted as 'GeneReviews ®'.
This is not strictly correct, but when tokenizing, will generate separate token for 'GeneReviews',
which is desirable.
"""
txt = None
if xelem is not None:
txt = ''
for subelem in xelem.iter():
if subelem.tag in ('abstract', 'title', 'p', 'sup', 'list-item'):
if txt and not txt.endswith(' '):
txt += ' '
elif subelem.tag == 'AbstractText':
if txt and not txt.endswith('\n'):
txt += '\n'
label = subelem.get("Label")
if label and label.upper() != "UNLABELLED":
txt += label + ":\n"
elif subelem.tag == "CopyrightInformation":
continue
if subelem.text:
txt += subelem.text
if subelem is not xelem and subelem.tag == 'title' and not txt.endswith(('. ', ': ')):
txt += ': '
if subelem.tail:
# Remove "\n" from subelem.tail
txt += re.sub(r"\s+", " ", subelem.tail)
if not txt:
txt = None
return clean_text(txt)
def clean_text(txt):
if txt is not None:
# Collapse multiple non-newline whitespaces to single BLANK
txt = re.sub(r'((?!\n)\s)+', ' ', txt.strip())
# Remove SPACE around newline
txt = re.sub(r' ?\n ?', '\n', txt)
# Collapse multiple newlines
txt = re.sub(r'\n+', '\n', txt)
# Remove SPACE preceding [,:.], IF there is also space after the punct.
txt = re.sub(r' ([,:.]) ', r'\1 ', txt)
return txt
def parse_dump_file(pubmed_dump_file: str) -> List[PubmedDocument]:
is_gzipped = False
open_fn = open
if pubmed_dump_file.endswith(".gz"):
is_gzipped = True
open_fn = gzip.open
with open_fn(pubmed_dump_file) as f:
ftxt = f.read()
if is_gzipped:
# noinspection PyUnresolvedReferences
ftxt = ftxt.decode("UTF-8")
root = ET.fromstring(ftxt)
pubmed_docs = []
# Ignore elements "PubmedBookArticle"
for doc_root in root.iterfind("./PubmedArticle"):
doc = PubmedDocument.from_xml(doc_root)
pubmed_docs.append(doc)
return pubmed_docs
def lazy_parse_dump_file(pubmed_dump_file: str):
"""
Generator for LazyPubmedDocument
:param pubmed_dump_file:
"""
is_gzipped = False
open_fn = open
if pubmed_dump_file.endswith(".gz"):
is_gzipped = True
open_fn = gzip.open
with open_fn(pubmed_dump_file) as f:
ftxt = f.read()
if is_gzipped:
# noinspection PyUnresolvedReferences
ftxt = ftxt.decode("UTF-8")
root = ET.fromstring(ftxt)
# Ignore elements "PubmedBookArticle"
for doc_root in root.iterfind("./PubmedArticle"):
doc = LazyPubmedDocument.from_pubmed_xml(doc_root, source=pubmed_dump_file)
yield doc
return
def extract_from_pubmed_dump(pubmed_dump_file: str,
output_dir: str,
pmids_file: str = None,
max_docs: int = 0,
verbose=False):
"""
Extracts Doc from PubMed dump, and writes it to `output_dir`.
:param pubmed_dump_file:
:param output_dir:
:param pmids_file:
:param max_docs:
:param verbose:
:return:
"""
pmids = None
if pmids_file is not None:
with open(os.path.expanduser(pmids_file)) as f:
pmids = set([line.strip() for line in f])
output_dir = os.path.expanduser(output_dir)
if not Path(output_dir).exists():
print("Creating dir:", output_dir)
Path(output_dir).mkdir()
if verbose:
print("Extracting from pubmed dump:", pubmed_dump_file, flush=True)
n_docs = 0
for doc in lazy_parse_dump_file(pubmed_dump_file):
if pmids and doc.pmid not in pmids:
continue
doc_file = f"{output_dir}/{doc.pmid}.xml"
ET.ElementTree(doc.to_xml()).write(doc_file, encoding="unicode", xml_declaration=True)
if verbose:
print(" ", doc.pmid, flush=True)
n_docs += 1
if 0 < max_docs <= n_docs:
break
return n_docs
def extract_proc_one(pubmed_dump_files_or_patt: Union[str, List[str]],
output_dir: str,
pmids_file: str,
proc_nbr: int,
nprocs: int):
"""
Called from `extract_from_pubmed_dump_mp`, does the tasks for one process (`proc_nbr`) out of `nprocs` processes.
:param pubmed_dump_files_or_patt:
:param output_dir:
:param pmids_file:
:param proc_nbr: in range [0, nprocs - 1]
:param nprocs: >= 1
:return: proc_nbr, Nbr docs written
"""
assert 0 <= proc_nbr < nprocs
if isinstance(pubmed_dump_files_or_patt, List):
pubmed_dump_files = [os.path.expanduser(f) for f in pubmed_dump_files_or_patt]
else:
print(f"extract_proc_one[{proc_nbr}]: pubmed_dump_files_or_patt =", pubmed_dump_files_or_patt,
flush=True)
pubmed_dump_files = glob.glob(os.path.expanduser(pubmed_dump_files_or_patt))
print("extract_proc_one[{}]: nbr dump files = {:,d}".format(proc_nbr, len(pubmed_dump_files)), flush=True)
# Ensure each process sees same ordering
pubmed_dump_files = sorted(pubmed_dump_files)
tot_docs_found = 0
# Process every `nprocs`-th file starting at index `proc_nbr`
for fi in range(proc_nbr, len(pubmed_dump_files), nprocs):
tot_docs_found += extract_from_pubmed_dump(pubmed_dump_files[fi], output_dir, pmids_file, verbose=False)
return proc_nbr, tot_docs_found
def extract_from_pubmed_dump_mp(pubmed_dump_files_or_patt: Union[str, List[str]],
output_dir: str,
pmids_file: str,
nprocs: int):
"""
Run `nprocs` processes to extract docs of specified PMID.
:param pubmed_dump_files_or_patt: Glob pattern or list of paths containing Pubmed-Dump
:param output_dir: Where each doc will be written as a file: "{output_dir}/{pmid}.xml"
:param pmids_file: One PMID per line
:param nprocs:
"""
print("extract_from_pubmed_dump_mp:")
print(" pubmed_dump_files_or_patt =", pubmed_dump_files_or_patt)
print(" output_dir =", output_dir)
print(" pmids_file =", pmids_file)
output_dir = os.path.expanduser(output_dir)
if not Path(output_dir).exists():
print("Creating dir:", output_dir)
Path(output_dir).mkdir()
print('Starting {} processes ...'.format(nprocs), flush=True)
# Using a process pool to start the sub-processes. Allows gathering return values.
# With this method, Queue instance must be inherited by the sub-processes (e.g. as a global);
# passing queue as an arg results in RuntimeError.
with futures.ProcessPoolExecutor(max_workers=nprocs) as executor:
results = executor.map(extract_proc_one,
[pubmed_dump_files_or_patt] * nprocs,
[output_dir] * nprocs,
[pmids_file] * nprocs,
range(nprocs), [nprocs] * nprocs)
# Get return values ... possible if processes started using ProcessPoolExecutor
tot_docs_found = 0
for (proc_nbr, docs_found) in results:
print('... Subprocess {:d} found {:,d} docs'.format(proc_nbr, docs_found))
tot_docs_found += docs_found
print('Total nbr docs written = {:,d}'.format(tot_docs_found))
return
def build_index(pubmed_dump_files_or_patt: Union[str, List[str]],
output_file: str,
nprocs: int):
# Import class here so that load from pickle does not report errors
# noinspection PyUnresolvedReferences
from cr.pubmed.pubmed_dump import PubmedDumpIndex
PubmedDumpIndex.build_save_index(pubmed_dump_files_or_patt, output_file, nprocs)
return
# ======================================================================================================
# Main
# ======================================================================================================
# Invoke as: python -m pubmed_dump CMD ...
if __name__ == '__main__':
import argparse
from datetime import datetime
from .misc import print_cmd
_argparser = argparse.ArgumentParser(
description='PubMed Dump Parser.',
)
_subparsers = _argparser.add_subparsers(dest='subcmd',
title='Available commands',
)
# Make the sub-commands required
_subparsers.required = True
# ... extract [-n NBR_PROCS] DUMP_PATH_PATTERN PMIDS_FILE OUTPUT_DIR
_sub_cmd_parser = _subparsers.add_parser('extract', help="Extract articles for specific PMIDs.")
_sub_cmd_parser.add_argument('-n', '--nbr_procs', type=int, default=4,
help="Nbr of sub-processes.")
_sub_cmd_parser.add_argument('dump_path_pattern', type=str,
help="Pattern for path to PubMed Dump files")
_sub_cmd_parser.add_argument('pmids_file', type=str,
help="Path to file containing PMIDs")
_sub_cmd_parser.add_argument('output_dir', type=str,
help="Output dir")
# ... build_index [-n NBR_PROCS] DUMP_PATH_PATTERN PMIDS_FILE OUTPUT_DIR
_sub_cmd_parser = _subparsers.add_parser('build_index',
help="Build and save PubmedDumpIndex.",
description=("e.g.: " +
"python -m pubmed_dump build_index -n 10 " +
"'../../PubMed/Data/D20191215/*.xml.gz' " +
"../../PubMed/Data/D20191215/pubmed_dump_index.pkl"))
_sub_cmd_parser.add_argument('-n', '--nbr_procs', type=int, default=4,
help="Nbr of sub-processes.")
_sub_cmd_parser.add_argument('dump_path_pattern', type=str,
help="Pattern for path to PubMed Dump files")
_sub_cmd_parser.add_argument('output_file', type=str,
help="Path to where PubmedDumpIndex will be written as a Pickle file")
# ...
_args = _argparser.parse_args()
# .................................................................................................
start_time = datetime.now()
print()
print_cmd()
if _args.subcmd == 'extract':
extract_from_pubmed_dump_mp(_args.dump_path_pattern, _args.output_dir, _args.pmids_file, _args.nbr_procs)
elif _args.subcmd == 'build_index':
build_index(_args.dump_path_pattern, _args.output_file, _args.nbr_procs)
else:
raise NotImplementedError(f"Command not implemented: {_args.subcmd}")
# /
print('\nTotal Run time =', datetime.now() - start_time)
| 34.331539 | 120 | 0.58199 | 18,628 | 0.584005 | 1,457 | 0.045678 | 7,113 | 0.222999 | 0 | 0 | 8,974 | 0.281343 |
f05afdbd5aec954079117e24e6a1f75f80dba71c | 1,523 | py | Python | Consumer_test.py | image-store-org/image-store-py-web-api-consumer-test | 59d805e8a7b459a97ede7285f6e4a67e87cfba02 | [
"MIT"
] | null | null | null | Consumer_test.py | image-store-org/image-store-py-web-api-consumer-test | 59d805e8a7b459a97ede7285f6e4a67e87cfba02 | [
"MIT"
] | null | null | null | Consumer_test.py | image-store-org/image-store-py-web-api-consumer-test | 59d805e8a7b459a97ede7285f6e4a67e87cfba02 | [
"MIT"
] | null | null | null | import sys
sys.path.append('dependencies/image-store-py-web-api-consumer')
from Consumer import Consumer
class Consumer_test:
def __init__(self):
self.c = Consumer()
def get(self):
print('\x1b[6;30;42m' + 'GET' + '\x1b[0m')
print(self.c.get())
print('{}\n'.format(self.c.get().json()))
# get a data entry by id
def get_id(self, id):
print('\x1b[6;30;42m' + 'GET ID({})'.format(id) + '\x1b[0m')
print(self.c.get_id(id))
print('{}\n'.format(self.c.get_id(id).json()))
# get latest data entry
def get_latest(self):
print('\x1b[6;30;42m' + 'GET LATEST' + '\x1b[0m')
print(self.c.get_latest())
print('{}\n'.format(self.c.get_latest().json()))
# post a data entry, id incremented by internal mySQL counter
def post(self):
print('\x1b[6;30;42m' + 'POST' + '\x1b[0m')
print(self.c.post())
print('{}\n'.format(self.c.post().json()))
# TODO be able to edit payload with keywords e.g: title.TEST
# edit existing data entry by id
def put(self, id):
print('\x1b[6;30;42m' + 'PUT({})'.format(id) + '\x1b[0m')
print(self.c.put(id))
# delete data entry by id
def delete(self, id):
print('\x1b[6;30;42m' + 'DELETE({})'.format(id) + '\x1b[0m')
print(self.c.delete(id))
if __name__ == '__main__':
consumer = Consumer_test()
consumer.get()
consumer.get_id(1)
consumer.post()
consumer.get_latest()
consumer.put(10) | 30.46 | 69 | 0.570584 | 1,249 | 0.820092 | 0 | 0 | 0 | 0 | 0 | 0 | 505 | 0.331582 |
f05b7050495370891bf951394304c4e6b993404b | 864 | py | Python | Algorithms/MostCommonWord/mostCommonWord.py | riddhi-27/HacktoberFest2020-Contributions | 0a5c39169723b3ea3b6447d4005896900dd789bc | [
"MIT"
] | null | null | null | Algorithms/MostCommonWord/mostCommonWord.py | riddhi-27/HacktoberFest2020-Contributions | 0a5c39169723b3ea3b6447d4005896900dd789bc | [
"MIT"
] | null | null | null | Algorithms/MostCommonWord/mostCommonWord.py | riddhi-27/HacktoberFest2020-Contributions | 0a5c39169723b3ea3b6447d4005896900dd789bc | [
"MIT"
] | null | null | null | """Returns words from the given paragraph which has been repeated most,
incase of more than one words, latest most common word is returned. """
import string
def mostCommonWord(paragraph: str) -> str:
# translate function maps every punctuation in given string to white space
words = paragraph.translate(str.maketrans(string.punctuation, ' '*len(string.punctuation)))
words = words.lower().split()
unique_words = {}
highest = 0
res = ''
for word in words:
if word not in unique_words:
unique_words[word] = 0
unique_words[word] += 1
if unique_words[word] >= highest:
highest = unique_words[word]
res = word
return res
print(mostCommonWord("HacktoberFest is live! Riddhi is participating in HACKtoBERfEST.Happy Coding.")) #Output: hacktoberfest | 34.56 | 125 | 0.66088 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 323 | 0.373843 |
f05bdaed59cf5073cab62db01710a16ba5ff7771 | 7,597 | py | Python | app/views.py | PaulMurrayCbr/GameNight | 838c19dda765027abbe8e12e331268b01cb859c2 | [
"Unlicense"
] | null | null | null | app/views.py | PaulMurrayCbr/GameNight | 838c19dda765027abbe8e12e331268b01cb859c2 | [
"Unlicense"
] | null | null | null | app/views.py | PaulMurrayCbr/GameNight | 838c19dda765027abbe8e12e331268b01cb859c2 | [
"Unlicense"
] | null | null | null | from app import app, db
from flask import render_template, flash, redirect, get_flashed_messages
import forms
import models
import Character
from flask.globals import request
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
def menugear() :
return {
'pcs': models.Character.query.all()
}
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html', menu=menugear())
@app.route('/whiteboard')
def whiteboard():
return render_template('whiteboard.html', menu=menugear())
@app.route('/pc/<name>/')
def character(name):
try:
pc = models.Character.query.filter_by(name=name).one()
updatepc_form=forms.PC(obj=pc)
newhp_form=forms.HP()
openhpbreakdown = False
states = get_flashed_messages(category_filter=['viewstate'])
if states:
for state in states:
if state['hpopen']:
openhpbreakdown = True
return render_template('pc.html',
updatepc_form=updatepc_form,
newhp_form = newhp_form,
pc=pc,
pcinfo=Character.buildInfo(pc),
menu=menugear(),
openhpbreakdown = openhpbreakdown)
except MultipleResultsFound, e:
flash(('Found multiple characters named %s' % name, 'danger'), 'msg')
pc = None
except NoResultFound, e:
flash(('PC %s not found' % name, 'warning'), 'msg')
pc = None
return redirect('/')
@app.route('/pc/<name>/update.do', methods=['POST'])
def do_updatepc(name):
try:
pc = models.Character.query.filter_by(name=name).one()
updatepc_form=forms.PC(obj=pc)
pc.abbrev = updatepc_form.abbrev.data
pc.name = updatepc_form.name.data
pc.pname = updatepc_form.pname.data
db.session.commit()
return redirect('/pc/%s' % pc.name)
except MultipleResultsFound, e:
flash(('Found multiple characters named %s' % name, 'danger'), 'msg')
pc = None
except NoResultFound, e:
flash(('PC %s not found' % name, 'warning'), 'msg')
pc = None
return redirect('/')
@app.route('/pc/<name>/addhptype.do', methods=['POST'])
def do_addhptypepc(name):
try:
pc = models.Character.query.filter_by(name=name).one()
newhp_form=forms.HP(obj=pc)
hp = models.Hp(
character_id = pc.id,
source = newhp_form.source.data,
max = newhp_form.max.data,
current = newhp_form.max.data,
ablative_only = newhp_form.ablative_only.data
)
db.session.add(hp)
db.session.commit()
flash({'hpopen':True}, 'viewstate')
return redirect('/pc/%s' % pc.name)
except MultipleResultsFound, e:
flash(('Found multiple characters named %s' % name, 'danger'), 'msg')
pc = None
except NoResultFound, e:
flash(('PC %s not found' % name, 'warning'), 'msg')
pc = None
return redirect('/')
@app.route('/pc/<name>/hp/<id>/set.do', methods=['GET', 'POST'])
def do_sethppc(name, id):
try:
pc = models.Character.query.filter_by(name=name).one()
hp = models.Hp.query.get(id)
if not hp:
flash(("HP %s not found" % id , 'danger'), 'msg')
elif hp.character_id != pc.id:
flash(("HP %s belongs to %s" % (id, hp.character.name) , 'danger'), 'msg')
else:
v = request.args.get('v', '')
if not v or v == '':
flash(("no new value specified" , 'warning'), 'msg')
else:
try:
v = int(v)
except ValueError, e:
flash(("'%s' does not appear to be a number" % v, 'warning'), 'msg')
hp.current = v
db.session.commit()
flash(("Set current to %d" % v , 'success'), 'msg')
flash({'hpopen':True}, 'viewstate')
return redirect('/pc/%s' % pc.name)
except MultipleResultsFound, e:
flash(('Found multiple characters named %s' % name, 'danger'), 'msg')
pc = None
except NoResultFound, e:
flash(('PC %s not found' % name, 'warning'), 'msg')
pc = None
return redirect('/')
@app.route('/pc/<name>/hp/<id>/max.do', methods=['GET', 'POST'])
def do_maxhppc(name, id):
try:
pc = models.Character.query.filter_by(name=name).one()
hp = models.Hp.query.get(id)
if not hp:
flash(("HP %s not found" % id , 'danger'), 'msg')
elif hp.character_id != pc.id:
flash(("HP %s belongs to %s" % (id, hp.character.name) , 'danger'), 'msg')
else:
v = request.args.get('v', '')
if not v or v == '':
flash(("no new value specified" , 'warning'), 'msg')
else:
try:
v = int(v)
except ValueError, e:
flash(("'%s' does not appear to be a number" % v, 'warning'), 'msg')
hp.max = v
db.session.commit()
flash(("Set max to %d" % v , 'success'), 'msg')
flash({'hpopen':True}, 'viewstate')
return redirect('/pc/%s' % pc.name)
except MultipleResultsFound, e:
flash(('Found multiple characters named %s' % name, 'danger'), 'msg')
pc = None
except NoResultFound, e:
flash(('PC %s not found' % name, 'warning'), 'msg')
pc = None
return redirect('/')
@app.route('/pc/<name>/hp/<id>/add.do', methods=['GET', 'POST'])
def do_addhppc(name, id):
try:
pc = models.Character.query.filter_by(name=name).one()
hp = models.Hp.query.get(id)
if not hp:
flash(("HP %s not found" % id , 'danger'), 'msg')
elif hp.character_id != pc.id:
flash(("HP %s belongs to %s" % (id, hp.character.name) , 'danger'), 'msg')
else:
v = request.args.get('v', '')
if not v or v == '':
flash(("no new value specified" , 'warning'), 'msg')
else:
try:
v = int(v)
except ValueError, e:
flash(("'%s' does not appear to be a number" % v, 'warning'), 'msg')
hp.current += v
db.session.commit()
if v < 0:
flash(("Subtracted %d" % -v , 'success'), 'msg')
else:
flash(("Added %d" % v , 'success'), 'msg')
flash({'hpopen':True}, 'viewstate')
return redirect('/pc/%s' % pc.name)
except MultipleResultsFound, e:
flash(('Found multiple characters named %s' % name, 'danger'), 'msg')
pc = None
except NoResultFound, e:
flash(('PC %s not found' % name, 'warning'), 'msg')
pc = None
return redirect('/')
@app.route('/pc/<name>/hp/<id>/zap.do', methods=['GET', 'POST'])
def do_zaphppc(name, id):
try:
pc = models.Character.query.filter_by(name=name).one()
hp = models.Hp.query.get(id)
if not hp:
flash(("HP %s not found" % id , 'danger'), 'msg')
elif hp.character_id != pc.id:
flash(("HP %s belongs to %s" % (id, hp.character.name) , 'danger'), 'msg')
else:
db.session.delete(hp)
db.session.commit()
flash(("Deleted" , 'success'), 'msg')
flash({'hpopen':True}, 'viewstate')
return redirect('/pc/%s' % pc.name)
except MultipleResultsFound, e:
flash(('Found multiple characters named %s' % name, 'danger'), 'msg')
pc = None
except NoResultFound, e:
flash(('PC %s not found' % name, 'warning'), 'msg')
pc = None
return redirect('/')
@app.route('/admin/pc/')
def adminpc():
pcs = models.Character.query.all()
return render_template('/admin/pcs.html',
pcs=pcs,
newpc_form=forms.PC(),
menu=menugear())
@app.route('/admin/pc/newpc.do', methods=['POST'])
def do_newpc():
form = forms.PC(request.form)
pc = models.Character(name=form.name.data, pname=form.pname.data, abbrev=form.abbrev.data)
db.session.add(pc)
db.session.commit()
flash(("New PC", 'success'), 'msg')
return redirect('/admin/pc/')
@app.route('/admin/pc/<id>/delete.do', methods=['GET'])
def do_deletepc(id):
pc = models.Character.query.get(id)
if not pc:
flash(("PC %s not found" % id , 'danger'), 'msg')
else :
db.session.delete(pc)
db.session.commit()
flash(("PC '%s' deleted" % pc.name , 'success'), 'msg')
return redirect('/admin/pc/')
| 26.939716 | 94 | 0.612874 | 0 | 0 | 0 | 0 | 7,247 | 0.953929 | 0 | 0 | 1,909 | 0.251283 |
f05d0c3401f69142c582ade92cb02f323289bd68 | 183 | py | Python | dingomata/config/__init__.py | tigershadowclaw/discord-dingomata | 0b20d7b75a0af3387b19b17c336e5e14356d2f14 | [
"MIT"
] | null | null | null | dingomata/config/__init__.py | tigershadowclaw/discord-dingomata | 0b20d7b75a0af3387b19b17c336e5e14356d2f14 | [
"MIT"
] | null | null | null | dingomata/config/__init__.py | tigershadowclaw/discord-dingomata | 0b20d7b75a0af3387b19b17c336e5e14356d2f14 | [
"MIT"
] | null | null | null | from .bot import GuildConfig, ServiceConfig, get_logging_config, service_config
__all__ = [
"ServiceConfig",
"GuildConfig",
"get_logging_config",
"service_config",
]
| 20.333333 | 79 | 0.721311 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.349727 |
f05da004efb57fa8123a5d8084bba03a6cd27ce9 | 623 | py | Python | create_tacacs.py | cromulon-actual/ise_automation | de3fbb762c3e1f4f41d81dda3bd2d33a11db1d58 | [
"MIT"
] | null | null | null | create_tacacs.py | cromulon-actual/ise_automation | de3fbb762c3e1f4f41d81dda3bd2d33a11db1d58 | [
"MIT"
] | null | null | null | create_tacacs.py | cromulon-actual/ise_automation | de3fbb762c3e1f4f41d81dda3bd2d33a11db1d58 | [
"MIT"
] | null | null | null | from ciscoisesdk import IdentityServicesEngineAPI
from ciscoisesdk.exceptions import ApiError
from dotenv import load_dotenv
import os
from pprint import pprint as ppr
load_dotenv()
admin = os.getenv("ISE_ADMIN")
pw = os.getenv("ISE_PW")
base_url = os.getenv("ISE_URL")
api = IdentityServicesEngineAPI(
username=admin, password=pw, base_url=base_url, version="3.0.0", verify=False)
print("=" * 50)
# Get Admin Users
search_result = api.admin_user.get_all()
ppr(search_result.response)
print("=" * 50)
# Get All TACACS Users
search_result = api.tacacs_profile.get_all()
ppr(search_result.response)
print("=" * 50)
| 23.074074 | 82 | 0.764045 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 83 | 0.133226 |
f05e57abf8788d483966f72cb158032481ce2596 | 4,121 | py | Python | 8_plot_data_perstation.py | sdat2/Yellowstone2 | 4008145b7372f5f5901de584656ceea44e73934a | [
"MIT"
] | null | null | null | 8_plot_data_perstation.py | sdat2/Yellowstone2 | 4008145b7372f5f5901de584656ceea44e73934a | [
"MIT"
] | null | null | null | 8_plot_data_perstation.py | sdat2/Yellowstone2 | 4008145b7372f5f5901de584656ceea44e73934a | [
"MIT"
] | null | null | null | # Program 8_plot_data_perstation.py written by Sanne Cottaar (sc845@cam.ac.uk)
file_name= '8_plot_data_perstation.py'
# Uses receiver functions computed to produce a nice graph for every directory in DATARF
import obspy
from obspy import read
from obspy.core import Stream
from obspy.core import trace
import matplotlib.pyplot as plt
import os.path
import time
import glob
import shutil
import numpy as np
from obspy import UTCDateTime
import receiver_function as rf
direc = 'DataRF'
flag = 'SV'
filt = 'jgf1'
stadirs = glob.glob(direc+'/*')
for stadir in stadirs:
print(stadir)
with open(stadir+'/selected_RFs_jgf1.dat','r') as f:
goodrfs= f.read().replace('\n', '')
# loop through events
stalist=glob.glob(stadir+'/*.PICKLE')
print(stalist)
c=0
# Loop through data
if(len(stalist)>0):
for i in range(len(stalist)): #range(cat.count()):
print(stalist[i])
seis=read(stalist[i],format='PICKLE')
distdg=seis[0].stats['dist']
if stalist[i] in goodrfs:
good=True
print('YAY',seis[0].stats['event'].magnitudes[0].mag)
else:
good=False
print('NO',seis[0].stats['event'].magnitudes[0].mag)
tshift=UTCDateTime(seis[0].stats['starttime'])-seis[0].stats['event'].origins[0].time
#Ptime=Ptime
plt.subplot(1,3,1)
vertical = seis.select(channel='BHZ')[0]
vertical.filter('bandpass', freqmin=0.01,freqmax=.1, corners=2, zerophase=True)
windowed=vertical[np.where(vertical.times()>seis[0].stats.traveltimes['P']-100) and np.where(vertical.times()<seis[0].stats.traveltimes['P']+100)]
norm=np.max(np.abs(windowed))
if good:
plt.plot(vertical.times()-seis[0].stats.traveltimes['P'], vertical.data/norm+np.round(distdg),'k')
else:
plt.plot(vertical.times()-seis[0].stats.traveltimes['P'], vertical.data/norm+np.round(distdg),'r')
#plt.plot(seis[0].stats.traveltimes['P'],np.round(distdg),'.b')
#plt.plot(seis[0].stats.traveltimes['S'],np.round(distdg),'.g')
plt.xlim([-25,150])
plt.ylim([30,92])
plt.subplot(1,3,2)
radial = seis.select(channel='BHR')[0]
radial.filter('bandpass', freqmin=0.01,freqmax=.1, corners=2, zerophase=True)
windowed=vertical[np.where(radial.times()>seis[0].stats.traveltimes['P']-100) and np.where(radial.times()<seis[0].stats.traveltimes['P']+100)]
norm=np.max(np.abs(windowed))
if good:
plt.plot(radial.times()-seis[0].stats.traveltimes['P'], radial.data/norm+np.round(distdg),'k')
else:
plt.plot(radial.times()-seis[0].stats.traveltimes['P'], radial.data/norm+np.round(distdg),'r')
plt.xlim([-25,150])
plt.plot(seis[0].stats.traveltimes['P'],np.round(distdg),'.b')
plt.plot(seis[0].stats.traveltimes['S'],np.round(distdg),'.g')
plt.ylim([30,92])
plt.subplot(1,3,3)
RF=getattr(seis[0],filt)['iterativedeconvolution']
time=getattr(seis[0],filt)['time']
if good:
plt.plot(time, RF/np.max(np.abs(RF))+np.round(distdg),'k')
else:
plt.plot(time, RF/np.max(np.abs(RF))+np.round(distdg),'r')
plt.subplot(1,3,1)
plt.title('vertical')
plt.ylabel('distance')
plt.xlabel('time')
plt.subplot(1,3,2)
plt.title('radial')
plt.ylabel('distance')
plt.xlabel('time')
plt.subplot(1,3,3)
plt.title('receiver functions')
plt.ylabel('distance')
plt.xlabel('time')
#plt.xlim([-150,1000])
plt.show()
| 40.009709 | 162 | 0.542344 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 744 | 0.180539 |
f061361346e5c53e6f9bfc725e3bc9a264fe2453 | 9,053 | py | Python | archon/__init__.py | HyechurnJang/archon | 2cda56436ed6dea65d38774f7c9ed6c3315dbc03 | [
"Apache-2.0"
] | 1 | 2018-03-07T08:33:23.000Z | 2018-03-07T08:33:23.000Z | archon/__init__.py | HyechurnJang/archon | 2cda56436ed6dea65d38774f7c9ed6c3315dbc03 | [
"Apache-2.0"
] | 2 | 2017-03-14T01:02:55.000Z | 2017-03-14T01:07:29.000Z | archon/__init__.py | HyechurnJang/archon | 2cda56436ed6dea65d38774f7c9ed6c3315dbc03 | [
"Apache-2.0"
] | 4 | 2017-02-03T04:53:07.000Z | 2020-04-20T07:52:47.000Z | # -*- coding: utf-8 -*-
################################################################################
# _____ _ _____ _ #
# / ____(_) / ____| | | #
# | | _ ___ ___ ___ | (___ _ _ ___| |_ ___ _ __ ___ ___ #
# | | | / __|/ __/ _ \ \___ \| | | / __| __/ _ \ '_ ` _ \/ __| #
# | |____| \__ \ (_| (_) | ____) | |_| \__ \ || __/ | | | | \__ \ #
# \_____|_|___/\___\___/ |_____/ \__, |___/\__\___|_| |_| |_|___/ #
# __/ | #
# |___/ #
# _ __ _____ _ _____ ______ #
# | |/ / / ____| | |/ ____| ____| #
# | ' / ___ _ __ ___ __ _ | (___ ___ | | (___ | |__ #
# | < / _ \| '__/ _ \/ _` | \___ \ / _ \| |\___ \| __| #
# | . \ (_) | | | __/ (_| | ____) | (_) | |____) | |____ #
# |_|\_\___/|_| \___|\__,_| |_____/ \___/|_|_____/|______| #
# #
################################################################################
# #
# Copyright (c) 2016 Cisco Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
import re
import json
from pygics import Burst
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from archon.settings import SESSION_COOKIE_AGE
from archon.view import *
ARCHON_DEBUG = False
class ManagerAbstraction:
__MANAGER__ = None
@classmethod
def instance(cls, *argv, **kargs):
if cls.__MANAGER__ == None: cls.__MANAGER__ = cls(*argv, **kargs)
return cls.__MANAGER__
def getSummary(self, r, m, v):
return {
'name' : '?',
'icon' : 'Default.png',
'desc' : 'This is Unknown Manager',
'link' : '/dashboard',
'view' : DIV()
}
class ArchonReq:
def __init__(self, request, method, path, query, data):
self.Request = request
self.Method = method
self.Path = path
self.Query = query
self.Data = data
def __str__(self):
return '%s:%s\nQuery:%s\nData:%s' % (self.Method, self.Path, self.Query, self.Data)
class ArchonView:
class PageContent(TAG):
def __init__(self):
TAG.__init__(self, 'div', CLASS='pagecontent')
def __init__(self, app, lang):
self.Menu = DIV()
self.Page = ArchonView.PageContent()
self._app = app
self._lang = lang
def __call__(self, key):
glb_locale = archon_locales['GLOBAL']
if self._app in archon_locales:
app_locale = archon_locales[self._app]
if key in app_locale:
key_locale = app_locale[key]
for lang in self._lang:
if lang in key_locale: return key_locale[lang]
if key in glb_locale:
key_locale = glb_locale[key]
for lang in self._lang:
if lang in key_locale: return key_locale[lang]
return key
def __render__(self):
return {'menu' : self.Menu, 'page' : self.Page}
@classmethod
def __error__(cls, title, msg):
return {'menu' : DIV(), 'page' : ALERT(title, msg, CLASS='alert-danger')}
def pageview(manager_class, **async_path):
def wrapper(view):
@login_required
def decofunc(request):
request.session.set_expiry(SESSION_COOKIE_AGE)
method = request.method
path = filter(None, request.path.split('/'))
lang = filter(None, re.split(';|,|q=0.\d', request.META['HTTP_ACCEPT_LANGUAGE']))
app = view.__module__.split('.')[1]
v = ArchonView(app, lang)
try: m = manager_class.instance()
except Exception as e: return JsonResponse(ArchonView.__error__(v('manager allocation error'), str(e)))
try:
if method == 'GET':
query = dict(request.GET)
data = {}
elif method == 'POST':
query = dict(request.POST)
if not hasattr(request, '_body') and request._read_started: data = request.FILES
else: data = json.loads(request.body)
elif method == 'PUT':
query = dict(request.PUT)
if not hasattr(request, '_body') and request._read_started: data = request.FILES
else: data = json.loads(request.body)
elif method == 'DELETE':
query = {}
data = {}
else:
query = {}
data = {}
except Exception as e: return JsonResponse(ArchonView.__error__(v('request error'), str(e)))
r = ArchonReq(request, method, path, query, data)
async_path_names = async_path.keys()
for async_path_name in async_path_names:
if async_path_name in path:
try: return JsonResponse(async_path[async_path_name](r, m, v))
except Exception as e: return JsonResponse(ArchonView.__error__(v('application error'), str(e)))
try: view(r, m, v)
except Exception as e: return JsonResponse(ArchonView.__error__(v('application error'), str(e)))
return JsonResponse(v.__render__())
def decofunc_debug(request):
method = request.method
path = filter(None, request.path.split('/'))
lang = filter(None, re.split(';|,|q=0.\d', request.META['HTTP_ACCEPT_LANGUAGE']))
app = view.__module__.split('.')[1]
v = ArchonView(app, lang)
m = manager_class.instance()
if method == 'GET':
query = dict(request.GET)
data = {}
elif method == 'POST':
query = dict(request.POST)
if not hasattr(request, '_body') and request._read_started: data = request.FILES
else: data = json.loads(request.body)
elif method == 'PUT':
query = dict(request.PUT)
if not hasattr(request, '_body') and request._read_started: data = request.FILES
else: data = json.loads(request.body)
elif method == 'DELETE':
query = {}
data = {}
else:
query = {}
data = {}
r = ArchonReq(request, method, path, query, data)
async_path_names = async_path.keys()
for async_path_name in async_path_names:
if async_path_name in path:
return JsonResponse(async_path[async_path_name](r, m, v))
view(r, m, v)
return JsonResponse(v.__render__())
if ARCHON_DEBUG: return decofunc_debug
else: return decofunc
return wrapper
def modelview(model):
admin.site.register(model, admin.ModelAdmin)
| 43.946602 | 117 | 0.435436 | 1,951 | 0.215509 | 0 | 0 | 2,441 | 0.269634 | 0 | 0 | 3,186 | 0.351928 |
f0618ae5c1b87db23e1c15aeed2890efe625454b | 283 | py | Python | src_Python/EtabsAPIaface0/a01comtypes/Excel03c.py | fjmucho/APIdeEtabsYPython | a5c7f7fe1861c4ac3c9370ef06e291f94c6fd523 | [
"MIT"
] | null | null | null | src_Python/EtabsAPIaface0/a01comtypes/Excel03c.py | fjmucho/APIdeEtabsYPython | a5c7f7fe1861c4ac3c9370ef06e291f94c6fd523 | [
"MIT"
] | null | null | null | src_Python/EtabsAPIaface0/a01comtypes/Excel03c.py | fjmucho/APIdeEtabsYPython | a5c7f7fe1861c4ac3c9370ef06e291f94c6fd523 | [
"MIT"
] | null | null | null | import sys
import comtypes
from comtypes.client import CreateObject
try:
# Connecting | coneccion
xl = CreateObject("Excel.Application")
except (OSError, comtypes.COMError):
print("No tiene instalada el programa(Excel).")
sys.exit(-1)
xl.Visible = True
print (xl) | 20.214286 | 49 | 0.720848 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 84 | 0.29682 |
f0625e6b2d07feed6a373b43052746c1a7b2640c | 984 | py | Python | home/tests/add-remove sector.py | caggri/FOFviz | 776ab387d832a86eea1a1b9064040d9b012494a7 | [
"MIT"
] | 2 | 2020-05-24T22:28:53.000Z | 2020-05-25T21:58:24.000Z | home/tests/add-remove sector.py | caggri/FOFviz | 776ab387d832a86eea1a1b9064040d9b012494a7 | [
"MIT"
] | null | null | null | home/tests/add-remove sector.py | caggri/FOFviz | 776ab387d832a86eea1a1b9064040d9b012494a7 | [
"MIT"
] | 1 | 2021-10-16T12:26:29.000Z | 2021-10-16T12:26:29.000Z | from selenium import webdriver
import time
chromedriver = "C:/Users/deniz/chromedriver/chromedriver"
driver = webdriver.Chrome(chromedriver)
driver.get('http://127.0.0.1:8000/')
dashboard = '//*[@id="accordionSidebar"]/li[1]/a'
sectors_1 = '//*[@id="sectors"]'
sectors_1_element = '//*[@id="sectors"]/option[4]'
add_sector = '//*[@id="select_filter_form"]/div[1]/input[1]'
remove_sector = '//*[@id="select_filter_form"]/div[1]/input[2]'
sectors_2 = '//*[@id="sectors2"]'
sectors_2_element = '//*[@id="sectors2"]/option[4]'
time.sleep(2)
driver.find_element_by_xpath(dashboard).click()
time.sleep(5)
driver.find_element_by_xpath(sectors_1).click()
time.sleep(2)
driver.find_element_by_xpath(sectors_1_element).click()
time.sleep(5)
driver.find_element_by_xpath(add_sector).click()
time.sleep(5)
driver.find_element_by_xpath(sectors_2).click()
time.sleep(2)
driver.find_element_by_xpath(sectors_2_element).click()
time.sleep(5)
driver.find_element_by_xpath(remove_sector).click()
| 29.818182 | 63 | 0.747967 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 299 | 0.303862 |
f065f569bc87da0b1005e3822cbd92500b510024 | 1,713 | py | Python | netensorflow/api_samples/ann_creation_and_usage.py | psigelo/NeTensorflow | ec8bc09cc98346484d1b682a3dfd25c68c4ded61 | [
"MIT"
] | null | null | null | netensorflow/api_samples/ann_creation_and_usage.py | psigelo/NeTensorflow | ec8bc09cc98346484d1b682a3dfd25c68c4ded61 | [
"MIT"
] | null | null | null | netensorflow/api_samples/ann_creation_and_usage.py | psigelo/NeTensorflow | ec8bc09cc98346484d1b682a3dfd25c68c4ded61 | [
"MIT"
] | null | null | null | import tensorflow as tf
from netensorflow.ann.ANN import ANN
from netensorflow.ann.macro_layer.MacroLayer import MacroLayer
from netensorflow.ann.macro_layer.layer_structure.InputLayerStructure import InputLayerStructure
from netensorflow.ann.macro_layer.layer_structure.LayerStructure import LayerStructure, LayerType
from netensorflow.ann.macro_layer.layer_structure.layers.FullConnected import FullConnected
from netensorflow.ann.macro_layer.layer_structure.layers.FullConnectedWithSoftmaxLayer import FullConnectedWithSoftmaxLayer
'''
ann Creation and simple usage, the goal of this code is simply run the most simpler artificial neural network
'''
def main():
# tensorflow
tf_sess = tf.Session()
# Layers:
input_dim = [None, 3]
hidden_layer = FullConnected(inputs_amount=20)
out_layer = FullConnectedWithSoftmaxLayer(inputs_amount=10)
# Layer Structures
input_layer_structure = InputLayerStructure(input_dim)
hidden_layer_structure = LayerStructure('Hidden', layer_type=LayerType.ONE_DIMENSION, layers=[hidden_layer])
output_layer_structure = LayerStructure('Output', layer_type=LayerType.ONE_DIMENSION,layers=[out_layer])
# Macro Layer
macro_layers = MacroLayer(layers_structure=[input_layer_structure, hidden_layer_structure, output_layer_structure])
# ann
ann = ANN(macro_layers=macro_layers, tf_session=tf_sess, base_folder='./tensorboard_logs/')
ann.connect_and_initialize()
# Execute
for it in range(100):
import numpy as np
input_tensor_value = [np.random.uniform(0.0, 10.0, 3)]
print(ann.run(global_iteration=it, input_tensor_value=input_tensor_value))
if __name__ == '__main__':
main()
| 37.23913 | 123 | 0.782837 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 236 | 0.13777 |
f0693b36a74b3acf0a861ff3c1c73f7355633501 | 3,449 | py | Python | auth-backend.py | alexanderbittner/spotify-tracks | 9095d0224f7e313d164a5da24add2b806afc1b31 | [
"MIT"
] | null | null | null | auth-backend.py | alexanderbittner/spotify-tracks | 9095d0224f7e313d164a5da24add2b806afc1b31 | [
"MIT"
] | null | null | null | auth-backend.py | alexanderbittner/spotify-tracks | 9095d0224f7e313d164a5da24add2b806afc1b31 | [
"MIT"
] | null | null | null | import json
from flask import Flask, request, redirect, g, render_template
import requests
from urllib.parse import quote
# Adapted from https://github.com/drshrey/spotify-flask-auth-example
# Authentication Steps, paramaters, and responses are defined at https://developer.spotify.com/web-api/authorization-guide/
app = Flask(__name__)
# Client & Token Files
CLIENT_ID_FILE = 'auth/client-id'
CLIENT_SECRET_FILE = 'auth/client-secret'
TOKEN_FILE = 'auth/token'
REFRESH_FILE = 'auth/refresh-token'
# Spotify URLS
SPOTIFY_AUTH_URL = "https://accounts.spotify.com/authorize"
SPOTIFY_TOKEN_URL = "https://accounts.spotify.com/api/token"
SPOTIFY_API_BASE_URL = "https://api.spotify.com"
API_VERSION = "v1"
SPOTIFY_API_URL = "{}/{}".format(SPOTIFY_API_BASE_URL, API_VERSION)
# Server-side Parameters
CLIENT_SIDE_URL = "http://127.0.0.1"
PORT = 876
REDIRECT_URI = "{}:{}/callback/q".format(CLIENT_SIDE_URL, PORT)
SCOPE = "user-read-playback-state user-modify-playback-state"
STATE = ""
SHOW_DIALOG_bool = True
SHOW_DIALOG_str = str(SHOW_DIALOG_bool).lower()
# Client Keys
with open(CLIENT_ID_FILE, 'r') as id:
CLIENT_ID = id.read()
with open(CLIENT_SECRET_FILE, 'r') as secret:
CLIENT_SECRET = secret.read()
auth_query_parameters = {
"response_type": "code",
"redirect_uri": REDIRECT_URI,
"scope": SCOPE,
"client_id": CLIENT_ID
}
@app.route("/")
def index():
# Auth Step 1: Authorization
url_args = "&".join(["{}={}".format(key, quote(val)) for key, val in auth_query_parameters.items()])
auth_url = "{}/?{}".format(SPOTIFY_AUTH_URL, url_args)
return redirect(auth_url)
@app.route("/callback/q")
def callback():
# Auth Step 4: Requests refresh and access tokens
auth_token = request.args['code']
code_payload = {
"grant_type": "authorization_code",
"code": str(auth_token),
"redirect_uri": REDIRECT_URI,
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
}
post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload)
# Auth Step 5: Tokens are Returned to Application
response_data = json.loads(post_request.text)
access_token = response_data["access_token"]
refresh_token = response_data["refresh_token"]
#token_type = response_data["token_type"]
#expires_in = response_data["expires_in"]
# Auth Step 6: write token to file
with open(TOKEN_FILE, 'w') as file:
file.write(access_token)
with open(REFRESH_FILE, 'w') as file:
file.write(refresh_token)
display_arr = 'success!'
return render_template("index.html", sorted_array=display_arr)
@app.route("/refresh")
def refresh():
with open(REFRESH_FILE, 'r') as f:
refresh_token = f.read()
# Auth Step R: Requests refreshed access token
code_payload = {
"grant_type": "refresh_token",
"refresh_token": refresh_token,
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
}
post_request = requests.post(SPOTIFY_TOKEN_URL, data=code_payload)
# Auth Step R1: Tokens are Returned to Application
response_data = json.loads(post_request.text)
access_token = response_data["access_token"]
# write token to file
with open(TOKEN_FILE, 'w') as file:
file.write(access_token)
display_arr = 'success!'
return render_template("index.html", sorted_array=display_arr)
if __name__ == "__main__":
app.run(debug=True, port=PORT) | 31.354545 | 123 | 0.701653 | 0 | 0 | 0 | 0 | 2,021 | 0.585967 | 0 | 0 | 1,268 | 0.367643 |
f069eb952ff8678a357b70b75757dce90f676973 | 3,342 | py | Python | Regression/utils_testing.py | saucec0de/sifu | 7924844e1737c7634016c677237bccd7e7651818 | [
"MIT"
] | 5 | 2021-03-26T08:19:43.000Z | 2021-12-18T18:04:04.000Z | Regression/utils_testing.py | saucec0de/sifu | 7924844e1737c7634016c677237bccd7e7651818 | [
"MIT"
] | null | null | null | Regression/utils_testing.py | saucec0de/sifu | 7924844e1737c7634016c677237bccd7e7651818 | [
"MIT"
] | null | null | null | import yaml
import os
### Sample Contents of config.yaml:
# 0002_info_leakage:
# category: Sifu C/C++
# points: 100
# description: Leave no trace
# vulnerability: CWE-14 * Information Leakage
# directory: Challenges/C_CPP/0002_info_leakage
# send_dir: true
# file: func_0009.c
# fname: func.c
# chal_id: c94062933919
# root: template
# root_file: chal_files.html
# run: ./run.py
# flag: f296-5420-65a9-7fc8
# type: c_makefile
# disable: false
# feedback: collect
# addHeader: |
# #define __OVERWRITE
# #include "utils.h"
# #include "deprecated.h"
# #include "redirect.h"
# #include "log.h"
localPath = os.path.join(os.path.dirname(__file__))
def FilesToJson(files, path=localPath):
"""
returns a {filename: contents} dict for
the given files on the given path
"""
contents = {}
# for multiple files, iterate over each
if type(files)==list:
for file in files:
with open(os.path.join(path, file)) as f:
contents[file]=f.read()
# for just one, do the deed
elif type(files)==str:
with open(os.path.join(path, files)) as f:
contents[files]=f.read()
# if we're here, we screwed up
else:
raise TypeError('[utils_testing] excuse me')
return contents
def fileContentsToStr(file):
with open(file, 'r') as f:
return f.read()
def makeIOforTest(path, inFileNames, outFileNames):
"""
Use to generate the test parametrization lists
----
Inputs: root path, expected input file names, expected output file names
Output: lists of one dict per param set (to be used with zip when parametrizing)
{
in_params:
[{inSet1_file1: inSet1_file1_contents, ..},
{inSet2_file2: inSet2__file2_contents}]
out_params:
[{outSet1_file1: outSet1_file1_contents, ..},
{outSet2_file2: outSet2__file2_contents}]
}
"""
test_in = []
test_out = []
for (dirpath, _, filenames) in os.walk(path):
if 'tc-' in dirpath:
files_in = {}
files_out = {}
for file in inFileNames:
files_in[file] = fileContentsToStr(os.path.join(dirpath,file))
for file in outFileNames:
files_out[file] = fileContentsToStr(os.path.join(dirpath,file))
test_in.append(files_in)
test_out.append(files_out)
return {'in_params': test_in,
'out_params': test_out}
if __name__=='__main__':
# local 'testing'
print("chalID for '0002_info_leakage' is:", chalNameToChalID('0002_info_leakage') )
print("files and filenames:\n", getFilesForChalID(chalNameToChalID('0002_info_leakage')))
print(FilesToJson(getFilesForChalID(chalNameToChalID('0002_info_leakage'))['fileNames'], path='../Challenges/C_CPP/0001_buffer_overflow'))
print("\n\n")
EgPathAsSeenByTests = '0002_info_leakage'
inFiles = ['database.json', 'func_0009.c']
outFiles = ['ai.json', 'log.txt']
outFiles_noLog = ['ai.json']
print(makeIOforTest('IO/0002_info_leakage', inFiles, outFiles))
| 28.810345 | 142 | 0.595751 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,790 | 0.535607 |
f06a56919cbaa9b5814f0dd5b244fec4364f26b3 | 423 | py | Python | python/arachne/runtime/rpc/logger.py | fixstars/arachne | 03c00fc5105991d0d706b935d77e6f9255bae9e7 | [
"MIT"
] | 3 | 2022-03-29T03:02:20.000Z | 2022-03-29T03:48:38.000Z | python/arachne/runtime/rpc/logger.py | fixstars/arachne | 03c00fc5105991d0d706b935d77e6f9255bae9e7 | [
"MIT"
] | null | null | null | python/arachne/runtime/rpc/logger.py | fixstars/arachne | 03c00fc5105991d0d706b935d77e6f9255bae9e7 | [
"MIT"
] | 1 | 2022-03-29T05:44:12.000Z | 2022-03-29T05:44:12.000Z | import logging
class Logger(object):
stream_handler = logging.StreamHandler()
formatter = logging.Formatter("[%(levelname)s %(pathname)s:%(lineno)d] %(message)s")
stream_handler.setFormatter(formatter)
stream_handler.setLevel(logging.INFO)
my_logger = logging.Logger("arachne.runtime.rpc")
my_logger.addHandler(stream_handler)
@staticmethod
def logger():
return Logger.my_logger
| 26.4375 | 88 | 0.72104 | 405 | 0.957447 | 0 | 0 | 63 | 0.148936 | 0 | 0 | 74 | 0.174941 |
f06ae02416b8f8f9bb909dbd1c4d484476e5b8f7 | 4,498 | py | Python | examples/pykey60/code-1.py | lesley-byte/pykey | ce21b5b6c0da938bf24891e5acb196d6779c433a | [
"MIT"
] | null | null | null | examples/pykey60/code-1.py | lesley-byte/pykey | ce21b5b6c0da938bf24891e5acb196d6779c433a | [
"MIT"
] | null | null | null | examples/pykey60/code-1.py | lesley-byte/pykey | ce21b5b6c0da938bf24891e5acb196d6779c433a | [
"MIT"
] | null | null | null | #pylint: disable = line-too-long
import os
import time
import board
import neopixel
import keypad
import usb_hid
import pwmio
import rainbowio
from adafruit_hid.keyboard import Keyboard
from pykey.keycode import KB_Keycode as KC
from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
# Hardware definition: GPIO where RGB LED is connected.
pixel_pin = board.NEOPIXEL
num_pixels = 61
pixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=1, auto_write=False)
cyclecount = 0
def rainbow_cycle(wait):
for i in range(num_pixels):
rc_index = (i * 256 // num_pixels) + wait
pixels[i] = rainbowio.colorwheel(rc_index & 255)
pixels.show()
buzzer = pwmio.PWMOut(board.SPEAKER, variable_frequency=True)
OFF = 0
ON = 2**15
# Hardware definition: Switch Matrix Setup.
keys = keypad.KeyMatrix(
row_pins=(board.ROW1, board.ROW2, board.ROW3, board.ROW4, board.ROW5),
column_pins=(board.COL1, board.COL2, board.COL3, board.COL4, board.COL5, board.COL6, board.COL7,
board.COL8, board.COL9, board.COL10, board.COL11, board.COL12, board.COL13, board.COL14),
columns_to_anodes=True,
)
# CONFIGURABLES ------------------------
MACRO_FOLDER = '/layers'
# CLASSES AND FUNCTIONS ----------------
class Layer:
""" Class representing a layer, for which we have a set
of macro sequences or keycodes"""
def __init__(self, layerdata):
self.name = layerdata['name']
self.macros = layerdata['macros']
# Neopixel update function
def update_pixels(color):
for i in range(num_pixels):
pixels[i] = color
pixels.show()
# INITIALIZATION -----------------------
# Load all the macro key setups from .py files in MACRO_FOLDER
layers = []
files = os.listdir(MACRO_FOLDER)
files.sort()
for filename in files:
print(filename)
if filename.endswith('.py'):
try:
module = __import__(MACRO_FOLDER + '/' + filename[:-3])
layers.append(Layer(module.layer))
except (SyntaxError, ImportError, AttributeError, KeyError, NameError,
IndexError, TypeError) as err:
print(err)
pass
if not layers:
print('NO MACRO FILES FOUND')
while True:
pass
layer_count = len(layers)
# print(layer_count)
def get_active_layer(layer_keys_pressed, layer_count):
tmp = 0
if len(layer_keys_pressed)>0:
for layer_id in layer_keys_pressed:
if layer_id > tmp: # use highest layer number
tmp = layer_id
if tmp >= layer_count:
tmp = layer_count-1
return tmp
# setup variables
keyboard = Keyboard(usb_hid.devices)
keyboard_layout = KeyboardLayoutUS(keyboard)
active_keys = []
not_sleeping = True
layer_index = 0
buzzer.duty_cycle = ON
buzzer.frequency = 440 #
time.sleep(0.05)
buzzer.frequency = 880 #
time.sleep(0.05)
buzzer.frequency = 440 #
time.sleep(0.05)
buzzer.duty_cycle = OFF
while not_sleeping:
key_event = keys.events.get()
if key_event:
key_number = key_event.key_number
cyclecount = cyclecount +1
rainbow_cycle(cyclecount)
# keep track of keys being pressed for layer determination
if key_event.pressed:
active_keys.append(key_number)
else:
active_keys.remove(key_number)
# reset the layers and identify which layer key is pressed.
layer_keys_pressed = []
for active_key in active_keys:
group = layers[0].macros[active_key][2]
for item in group:
if isinstance(item, int):
if (item >= KC.LAYER_0) and (item <= KC.LAYER_F) :
layer_keys_pressed.append(item - KC.LAYER_0)
layer_index = get_active_layer(layer_keys_pressed, layer_count)
# print(layer_index)
# print(layers[layer_index].macros[key_number][1])
group = layers[layer_index].macros[key_number][2]
color = layers[layer_index].macros[key_number][0]
if key_event.pressed:
update_pixels(color)
for item in group:
if isinstance(item, int):
keyboard.press(item)
else:
keyboard_layout.write(item)
else:
for item in group:
if isinstance(item, int):
if item >= 0:
keyboard.release(item)
#update_pixels(0x000000)
time.sleep(0.002)
| 28.289308 | 106 | 0.631392 | 229 | 0.050912 | 0 | 0 | 0 | 0 | 0 | 0 | 768 | 0.170743 |
f06b5ca0b13a5293cc2597359395e328535fbb92 | 433 | py | Python | tags.py | Manugs51/TFM_Metaforas | 3fb459cf80c71e6fbb1c2a58d20bc03a05a760bd | [
"MIT"
] | null | null | null | tags.py | Manugs51/TFM_Metaforas | 3fb459cf80c71e6fbb1c2a58d20bc03a05a760bd | [
"MIT"
] | null | null | null | tags.py | Manugs51/TFM_Metaforas | 3fb459cf80c71e6fbb1c2a58d20bc03a05a760bd | [
"MIT"
] | null | null | null |
UNIVERSAL_POS_TAGS = {
'VERB': 'verbo',
'NOUN': 'nombre',
'PRON': 'pronombre',
'ADJ' : 'adjetivo',
'ADV' : 'adverbio',
'ADP' : 'aposición',
'CONJ': 'conjunción',
'DET' : 'determinante',
'NUM' : 'numeral',
'PRT' : 'partícula gramatical',
'X' : 'desconocido',
'.' : 'signo de puntuación',
}
BABEL = {
'v': 'verbo',
'n': 'nombre',
'a': 'adjetivo',
'r': 'adverbio',
} | 19.681818 | 35 | 0.484988 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 259 | 0.592677 |
f06b803afcbf533b08fafb38257aef61240b8c46 | 942 | py | Python | pyramid_oereb/contrib/data_sources/standard/sources/availability.py | pyramidoereb/pyramid_oereb | 764c03e98e01ebc709cd17bd0ffd817bfe318892 | [
"BSD-2-Clause"
] | 2 | 2018-01-23T13:16:12.000Z | 2018-01-26T06:27:29.000Z | pyramid_oereb/contrib/data_sources/standard/sources/availability.py | camptocamp/pyramid_oereb | 2d33aceb796f0afada6728820fa9d4691f7e273a | [
"BSD-2-Clause"
] | 298 | 2017-08-30T07:12:10.000Z | 2019-01-31T10:52:07.000Z | pyramid_oereb/contrib/data_sources/standard/sources/availability.py | pyramidoereb/pyramid_oereb | 764c03e98e01ebc709cd17bd0ffd817bfe318892 | [
"BSD-2-Clause"
] | 4 | 2017-12-01T09:51:42.000Z | 2018-11-21T11:02:47.000Z |
from pyramid_oereb.core.sources import BaseDatabaseSource
from pyramid_oereb.core.sources.availability import AvailabilityBaseSource
class DatabaseSource(BaseDatabaseSource, AvailabilityBaseSource):
def read(self):
"""
The read method to access the standard database structure. It uses SQL-Alchemy for querying. It does
not accept any parameters nor it applies any filter on the database query. It simply loads all
content from the configured model.
"""
session = self._adapter_.get_session(self._key_)
try:
results = session.query(self._model_).all()
self.records = list()
for result in results:
self.records.append(self._record_class_(
result.municipality_fosnr,
result.theme_code,
result.available
))
finally:
session.close()
| 34.888889 | 108 | 0.632696 | 805 | 0.854565 | 0 | 0 | 0 | 0 | 0 | 0 | 270 | 0.286624 |
f06c0a436a6bd375e7b53e8c96203ec37cc92572 | 1,624 | py | Python | tests/fixtures/specification.py | FlyingBird95/openapi_generator | df4649b9723eb89fa370b02220356b7596794069 | [
"MIT"
] | 3 | 2022-01-10T12:43:36.000Z | 2022-01-13T18:08:15.000Z | tests/fixtures/specification.py | FlyingBird95/openapi_generator | df4649b9723eb89fa370b02220356b7596794069 | [
"MIT"
] | 6 | 2021-12-09T20:08:19.000Z | 2021-12-21T13:31:54.000Z | tests/fixtures/specification.py | FlyingBird95/openapi-builder | df4649b9723eb89fa370b02220356b7596794069 | [
"MIT"
] | 2 | 2021-12-17T17:26:06.000Z | 2021-12-17T17:39:00.000Z | from pytest_factoryboy import register
from tests.factories.specification import (
CallbackFactory,
ComponentsFactory,
ContactFactory,
DiscriminatorFactory,
EncodingFactory,
ExampleFactory,
ExternalDocumentationFactory,
HeaderFactory,
InfoFactory,
LicenseFactory,
LinkFactory,
MediaTypeFactory,
OAuthFlowFactory,
OAuthFlowsFactory,
OpenAPIFactory,
OperationFactory,
ParameterFactory,
PathItemFactory,
PathsFactory,
ReferenceFactory,
RequestBodyFactory,
ResponseFactory,
ResponsesFactory,
SchemaFactory,
SecurityRequirementFactory,
SecuritySchemeFactory,
ServerFactory,
ServerVariableFactory,
TagFactory,
)
register(OpenAPIFactory)
register(InfoFactory)
register(ContactFactory)
register(LicenseFactory)
register(ServerFactory)
register(ServerVariableFactory)
register(ComponentsFactory)
register(PathsFactory)
register(PathItemFactory)
register(OperationFactory)
register(ExternalDocumentationFactory)
register(ParameterFactory)
register(RequestBodyFactory)
register(MediaTypeFactory)
register(EncodingFactory)
register(ResponsesFactory)
register(ResponseFactory)
register(CallbackFactory)
register(ExampleFactory)
register(LinkFactory)
register(HeaderFactory)
register(TagFactory)
register(ReferenceFactory)
register(SchemaFactory)
register(SchemaFactory, "second_schema")
register(DiscriminatorFactory)
register(SecuritySchemeFactory)
register(OAuthFlowsFactory, "oauth_flows")
register(OAuthFlowFactory, "oauth_flow")
register(OAuthFlowFactory, "second_oauth_flow")
register(SecurityRequirementFactory)
| 24.606061 | 47 | 0.816502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.03633 |
f06e460c939f7ca739d389122382d4b13d1f8d29 | 3,856 | py | Python | app.py | samstruthers35/sqlalchemy-challenge | 0022e7459fc59a7bee85489f8d264a8aee9c01c8 | [
"ADSL"
] | null | null | null | app.py | samstruthers35/sqlalchemy-challenge | 0022e7459fc59a7bee85489f8d264a8aee9c01c8 | [
"ADSL"
] | null | null | null | app.py | samstruthers35/sqlalchemy-challenge | 0022e7459fc59a7bee85489f8d264a8aee9c01c8 | [
"ADSL"
] | null | null | null | import datetime as dt
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify, render_template
engine = create_engine('sqlite:///hawaii.sqlite')
Base = automap_base()
Base.prepare(engine, reflect=True)
Station = Base.classes.station
Measurement = Base.classes.measurement
session = Session(engine)
app = Flask(__name__)
@app.route("/")
def index():
return(
"Welcome to the Climate App!<br />"
"Available Routes:<br />"
"/api/v1.0/precipitation<br />"
"/api/v1.0/stations<br />"
"/api/v1.0/tobs<br />"
"/api/v1.0/<start> ENTER START DATE AT END OF URL <br /> "
"/api/v1.0/<start><end> ENTER START DATE/END DATE AT END OF URL<br />"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
todays_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
twelve_months = dt.date(2017, 8, 23) - dt.timedelta(days=365)
precipitation = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date > twelve_months)
rain = []
for rains in precipitation:
row = {}
row["date"] = rains[0]
row["prcp"] = rains[1]
rain.append(row)
return jsonify(rain)
@app.route("/api/v1.0/stations")
def stations():
station_names = session.query(Station.station, Station.name).group_by(Station.station).all()
names_dict = []
for names in station_names:
row_names = {}
row_names["station name"] = names[0]
names_dict.append(row_names)
return jsonify(names_dict)
@app.route("/api/v1.0/tobs")
def tobs():
todays_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()
twelve_months = dt.date(2017, 8, 23) - dt.timedelta(days=365)
tobs_session = session.query(Measurement.date, Measurement.tobs).\
filter(Measurement.date > twelve_months)
temperature = []
for degrees in tobs_session:
row_tobs = {}
row_tobs["date"] = degrees[0]
row_tobs["tobs"] = degrees[1]
temperature.append(row_tobs)
return jsonify(temperature)
@app.route("/api/v1.0/<start>")
def starting(start):
start_date = dt.datetime.strptime(start, '%Y-%m-%d')
starting_date = session.query(func.max(Measurement.tobs),func.min(Measurement.tobs),func.avg(Measurement.tobs)).\
filter(Measurement.date >= start_date)
date_tobs = []
for tobs in starting_date:
tobs_json = {}
tobs_json["AVERAGE TEMPERATURE"] = tobs[2]
tobs_json["MAXIMUM TEMPERATURE"] = tobs[0]
tobs_json["MINIMUM TEMPERATURE"] = tobs[1]
date_tobs.append(tobs_json)
return jsonify(date_tobs)
@app.route("/api/v1.0/<start>/<end>")
def range(start,end):
start_range = dt.datetime.strptime(start, '%Y-%m-%d')
end_range = dt.datetime.strptime(end, '%Y-%m-%d')
date_range = session.query(func.max(Measurement.tobs),func.min(Measurement.tobs),func.avg(Measurement.tobs)).\
filter(Measurement.date >= start_range, Measurement.date <= end_range)
date_range_tobs = []
for tobs_range in date_range:
tobs_json_range = {}
tobs_json_range["AVERAGE TEMPERATURE"] = tobs_range[2]
tobs_json_range["MAXIMUM TEMPERATURE"] = tobs_range[0]
tobs_json_range["MINIMUM TEMPERATURE"] = tobs_range[1]
date_range_tobs.append(tobs_json_range)
return jsonify(date_range_tobs)
if __name__ == "__main__":
app.run(debug=True) | 33.824561 | 117 | 0.625778 | 0 | 0 | 0 | 0 | 3,296 | 0.854772 | 0 | 0 | 604 | 0.156639 |
f06e635c1ec15823a66500dd05606d30ee6110ce | 4,096 | py | Python | WebSocket_Chat_Room/chat_room_v001/handlers/login.py | MMingLeung/Python_Study | 4ff1d02d2b6dd54e96f7179fa000548936b691e7 | [
"MIT"
] | 3 | 2017-12-27T14:08:17.000Z | 2018-02-10T13:01:08.000Z | WebSocket_Chat_Room/chat_room_v001/handlers/login.py | MMingLeung/Python_Study | 4ff1d02d2b6dd54e96f7179fa000548936b691e7 | [
"MIT"
] | 4 | 2017-05-24T10:37:05.000Z | 2021-06-10T18:35:32.000Z | WebSocket_Chat_Room/chat_room_v001/handlers/login.py | MMingLeung/Python_Study | 4ff1d02d2b6dd54e96f7179fa000548936b691e7 | [
"MIT"
] | 1 | 2018-02-14T19:05:30.000Z | 2018-02-14T19:05:30.000Z | #!/usr/bin/env python
#! -*- coding: utf-8 -*-
'''
Handler for login
'''
import tornado.web
from lib.db_controller import DBController
from lib.CUSTOMIZED_SESSION.my_session import SessionFactory
class LoginHandler(tornado.web.RequestHandler):
def initialize(self):
# 钩子函数 首先执行
class_ = SessionFactory.get_session()
# 实例化 session 类的对象
self.session = class_(self)
def get(self, *args, **kwargs):
self.render('login.html', msg='')
def post(self, *args, **kwargs):
username = self.get_argument('username')
password = self.get_argument('password')
db_obj = DBController()
db_obj.search('SELECT * from userinfo WHERE username=(%s) AND password=(%s)', (username, password,))
data = db_obj.get_one()
# sq = db_obj.search('SELECT * from user2user, userinfo where user2user.user_2=userinfo.uid')
if data:
# 好友
db_obj.search(
'select user_2, username from user2user left join userinfo on userinfo.uid=user2user.user_2 where user_1=(%s)',
(data[0],))
friends = db_obj.get_all()
# 1、查询数据库中,friend_application 表的 user_recv 等于当前 my_nid的数据
# 2、查询出发送申请的人信息
# 3、推送到客户端中显示
# 申请信息
# SELECT * from friend_application left join userinfo on userinfo.uid = friend_application.user_apply where user_recv = my_nid
db_obj.db_dict_cur()
db_obj.search(
'SELECT * from friend_application '
'left join userinfo on userinfo.uid = friend_application.user_apply '
'WHERE user_recv=%s AND confirm!=1',
(data[0],))
apply_data = db_obj.get_all()
# 组
# SELECT * FROM user2group LEFT JOIN user_group ON group_id=user_group.gid WHERE user_id=%s
db_obj.search(
'SELECT * FROM user2group LEFT JOIN user_group ON group_id=user_group.gid WHERE user_id=%s',
(data[0],))
group_data = db_obj.get_all()
# 组成员
# SELECT * FROM user2group LEFT JOIN user_group ON group_id=user_group.gid WHERE user_id=%s',
db_obj.search(
'''
SELECT admin, block, group_id,user_id, userinfo.username FROM user2group
LEFT JOIN userinfo ON userinfo.uid = user2group.user_id
WHERE group_id in (
SELECT group_id FROM user2group
LEFT JOIN user_group ON group_id=user_group.gid
WHERE user_id=%s)
''',
(data[0],))
# 处理组信息
group_member = db_obj.get_all()
group_fix_data = {}
for member in group_member:
temp_dict = {
'user_id': member['user_id'],
'username': member['username'],
'block': member['block'],
}
if member['group_id'] not in group_fix_data.keys():
group_fix_data[member['group_id']] = {}
group_fix_data[member['group_id']]['admin'] = member['admin']
group_fix_data[member['group_id']]['mems'] = []
group_fix_data[member['group_id']]['mems'].append(temp_dict)
else:
group_fix_data[member['group_id']]['mems'].append(temp_dict)
db_obj.db_close()
self.session['user_data'] = {'data': data, 'friends': friends, 'group': group_data, 'group_member': group_fix_data}
self.render('index.html',
friends=self.session['user_data']['friends'],
my_data=self.session['user_data']['data'],
add_error="",
apply_data=apply_data,
group_data=group_data,
group_member=group_fix_data,
)
else:
self.render('login.html', msg='账号或密码错误')
| 41.795918 | 138 | 0.541992 | 4,049 | 0.953154 | 0 | 0 | 0 | 0 | 0 | 0 | 1,863 | 0.438559 |
f06ebdf27eb473116d5a5a69d7c99a59502c6586 | 409 | py | Python | hackerrank/python/introduction/function.py | wingkwong/competitive-programming | e8bf7aa32e87b3a020b63acac20e740728764649 | [
"MIT"
] | 18 | 2020-08-27T05:27:50.000Z | 2022-03-08T02:56:48.000Z | hackerrank/python/introduction/function.py | wingkwong/competitive-programming | e8bf7aa32e87b3a020b63acac20e740728764649 | [
"MIT"
] | null | null | null | hackerrank/python/introduction/function.py | wingkwong/competitive-programming | e8bf7aa32e87b3a020b63acac20e740728764649 | [
"MIT"
] | 1 | 2020-10-13T05:23:58.000Z | 2020-10-13T05:23:58.000Z | def is_leap(year):
leap = False
# Write your logic here
# The year can be evenly divided by 4, is a leap year, unless:
# The year can be evenly divided by 100, it is NOT a leap year, unless:
# The year is also evenly divisible by 400. Then it is a leap year.
leap = (year % 4 == 0 and (year % 400 == 0 or year % 100 != 0))
return leap
year = int(input())
print(is_leap(year)) | 34.083333 | 75 | 0.628362 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 223 | 0.545232 |
f06f16ee399ccb9faac16cda8b08d3cc4df552cb | 1,480 | py | Python | projectenv/main/forms.py | rzsaglam/project-env | f4c02b15cf924ba5d69d8a4a89efcc686b73aa9c | [
"MIT"
] | null | null | null | projectenv/main/forms.py | rzsaglam/project-env | f4c02b15cf924ba5d69d8a4a89efcc686b73aa9c | [
"MIT"
] | null | null | null | projectenv/main/forms.py | rzsaglam/project-env | f4c02b15cf924ba5d69d8a4a89efcc686b73aa9c | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth import models
from django.db.models.base import Model
from django.forms import ModelForm, fields
from .models import Paint
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
class StockForm(forms.ModelForm):
class Meta:
model = Paint
fields = "__all__"
class PaintForm(forms.ModelForm):
class Meta:
model = Paint
fields = "__all__"
def save(self, commit=True):
paint = super(PaintForm, self).save(commit=False)
if commit:
paint.save()
return paint
class NewUserForm(UserCreationForm):
username = forms.CharField(max_length=200, required=True, widget=forms.TextInput(
attrs={'class': 'input-group-text'}))
class Meta:
model = User
fields = ("username", "password1", "password2")
def save(self, commit=True):
user = super(NewUserForm, self).save(commit=False)
if commit:
user.save()
return user
class LoginForm(AuthenticationForm):
username = forms.CharField(max_length=200, required=True, widget=forms.TextInput(
attrs={'class': 'input-group-text'}))
password = forms.CharField(max_length=200, required=True, widget=forms.TextInput(
attrs={'class': 'input-group-text'}))
class Meta:
model = User
fields = ("username", "password")
| 27.924528 | 85 | 0.667568 | 1,151 | 0.777703 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.097973 |
f06f2cf97d8da48c7ae640dd4974c12d832537f5 | 3,398 | py | Python | njdate/hebdfind.py | schorrm/njdate | 5a31d944973904b75f1dbac811fc7393aaa4ed7c | [
"MIT"
] | 4 | 2019-07-16T19:58:42.000Z | 2021-11-17T14:50:17.000Z | njdate/hebdfind.py | schorrm/njdate | 5a31d944973904b75f1dbac811fc7393aaa4ed7c | [
"MIT"
] | null | null | null | njdate/hebdfind.py | schorrm/njdate | 5a31d944973904b75f1dbac811fc7393aaa4ed7c | [
"MIT"
] | null | null | null | # Takes two years, and runs an aggressive search for dates in between those two years (inclusive).
import njdate.gematria as gematria
import njdate.ej_generic as ej_generic
import string
specpunc = string.punctuation.replace('"','').replace("'","")
tr_table = str.maketrans("","",specpunc)
def date_aggressor (search_text, begin_year, end_year):
tokens = search_text.translate(tr_table).split()
for search_year in range (begin_year, end_year+1):
if gematria.YearNoToGematria(search_year) in tokens:
query = ' '.join(search_text.partition(gematria.YearNoToGematria(search_year))[:2])
return ej_generic.ExtractDate(query)
if gematria.YearNoToGematria(search_year, False) in tokens:
query = ' '.join(search_text.partition(gematria.YearNoToGematria(search_year, False))[:2])
return ej_generic.ExtractDate(query)
if gematria.YearNoToGematria(search_year, prepend_heh=True) in tokens:
query = ' '.join(search_text.partition(gematria.YearNoToGematria(search_year, False))[:2])
return ej_generic.ExtractDate(query)
return None
def date_aggressor_lamedify (search_text, begin_year, end_year):
tokens = search_text.translate(tr_table).split()
for search_year in range (begin_year, end_year+1):
if gematria.YearNoToGematria(search_year) in tokens:
query = ' '.join(search_text.partition(gematria.YearNoToGematria(search_year))[:2])
return ej_generic.ExtractDate(query)
if gematria.YearNoToGematria(search_year, False) in tokens:
query = ' '.join(search_text.partition(gematria.YearNoToGematria(search_year, False))[:2])
return ej_generic.ExtractDate(query)
if gematria.YearNoToGematria(search_year, False, False) + '"ל' in tokens:
query = ' '.join(search_text.partition(gematria.YearNoToGematria(search_year, False, False))[:2])
return ej_generic.ExtractDate(query)
return None
# For dropped Tafs etc, so we need to add 400 years after what we've found, etc
def yshift_date_aggressor (search_text, begin_year, end_year, shift=400):
# change: move search year and begin year to before shifting 400 years, so the call is the same.
begin_year -= shift
end_year -= shift
tokens = search_text.translate(tr_table).split()
for search_year in range (begin_year, end_year+1):
if gematria.YearNoToGematria(search_year) in tokens:
query = ' '.join(search_text.partition(gematria.YearNoToGematria(search_year))[:2])
return ej_generic.ForceYear(query, search_year+shift)
if gematria.YearNoToGematria(search_year, False) in tokens:
query = ' '.join(search_text.partition(gematria.YearNoToGematria(search_year, False))[:2])
return ej_generic.ForceYear(query, search_year+shift)
return None
def yshift_date_aggressor_lamedify (search_text, begin_year, end_year, shift=400):
tokens = search_text.translate(tr_table).split()
for search_year in range (begin_year, end_year+1):
if gematria.YearNoToGematria(search_year, False, False) + '"ל' in tokens:
query = ' '.join(search_text.partition(gematria.YearNoToGematria(search_year, False, False))[:2])
return ej_generic.ForceYear(query, search_year+shift)
return None
| 57.59322 | 110 | 0.700706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 327 | 0.096176 |
f0706f06dae68a2eb12befe8740b73ce25344c53 | 10,323 | py | Python | tests/test_cli.py | redglue/brickops | 77fbe0da295f69b2b8bfebd0ec2c8b3bfdb1046b | [
"BSD-3-Clause"
] | null | null | null | tests/test_cli.py | redglue/brickops | 77fbe0da295f69b2b8bfebd0ec2c8b3bfdb1046b | [
"BSD-3-Clause"
] | 3 | 2019-07-23T16:38:14.000Z | 2021-06-02T03:55:23.000Z | tests/test_cli.py | aquicore/apparate | bc0d9a5db2ffb863ddde4ff61ac2ac0dbc8f1bad | [
"BSD-3-Clause"
] | null | null | null | import logging
from os.path import expanduser, join
from unittest import mock
import pytest
from click.testing import CliRunner
from configparser import ConfigParser
from apparate.configure import configure
from apparate.cli_commands import upload, upload_and_update
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('apparate.cli_commands')
def test_configure_no_existing_config():
expected_stdout = (
'Databricks host (e.g. https://my-organization.cloud.databricks.com): '
'https://test_host\n'
'Databricks API token: \n'
'Repeat for confirmation: \n'
'Databricks folder for production libraries: test_folder\n'
)
filename = join(expanduser('~'), '.apparatecfg')
expected_call_list = [
mock.call(filename, encoding=None),
mock.call(filename, 'w+'),
mock.call().write('[DEFAULT]\n'),
mock.call().write('host = https://test_host\n'),
mock.call().write('token = test_token\n'),
mock.call().write('prod_folder = test_folder\n'),
mock.call().write('\n'),
]
with mock.patch('builtins.open', mock.mock_open(read_data='')) as m_open:
runner = CliRunner()
result = runner.invoke(
configure,
input=(
'https://test_host\n'
'test_token\n'
'test_token\n'
'test_folder\n'
),
)
m_open.assert_has_calls(expected_call_list, any_order=True)
assert not result.exception
assert result.output == expected_stdout
def test_configure_extra_slash_in_host():
expected_stdout = (
'Databricks host (e.g. https://my-organization.cloud.databricks.com): '
'https://test_host/\n'
'Databricks API token: \n'
'Repeat for confirmation: \n'
'Databricks folder for production libraries: test_folder\n'
)
filename = join(expanduser('~'), '.apparatecfg')
expected_call_list = [
mock.call(filename, encoding=None),
mock.call(filename, 'w+'),
mock.call().write('[DEFAULT]\n'),
mock.call().write('host = https://test_host\n'),
mock.call().write('token = test_token\n'),
mock.call().write('prod_folder = test_folder\n'),
mock.call().write('\n'),
]
with mock.patch('builtins.open', mock.mock_open(read_data='')) as m_open:
runner = CliRunner()
result = runner.invoke(
configure,
input=(
'https://test_host/\n'
'test_token\n'
'test_token\n'
'test_folder\n'
),
)
m_open.assert_has_calls(expected_call_list, any_order=True)
assert not result.exception
assert result.output == expected_stdout
def test_configure_extra_slash_in_folder():
expected_stdout = (
'Databricks host (e.g. https://my-organization.cloud.databricks.com): '
'https://test_host\n'
'Databricks API token: \n'
'Repeat for confirmation: \n'
'Databricks folder for production libraries: test_folder/\n'
)
filename = join(expanduser('~'), '.apparatecfg')
expected_call_list = [
mock.call(filename, encoding=None),
mock.call(filename, 'w+'),
mock.call().write('[DEFAULT]\n'),
mock.call().write('host = https://test_host\n'),
mock.call().write('token = test_token\n'),
mock.call().write('prod_folder = test_folder\n'),
mock.call().write('\n'),
]
with mock.patch('builtins.open', mock.mock_open(read_data='')) as m_open:
runner = CliRunner()
result = runner.invoke(
configure,
input=(
'https://test_host\n'
'test_token\n'
'test_token\n'
'test_folder/\n'
),
)
m_open.assert_has_calls(expected_call_list, any_order=True)
assert not result.exception
assert result.output == expected_stdout
def test_configure_no_http_in_host():
expected_stdout = (
'Databricks host (e.g. https://my-organization.cloud.databricks.com): '
'test_host\n'
"looks like there's an issue - make sure the host name starts "
'with http: https://test_host\n'
'Databricks API token: \n'
'Repeat for confirmation: \n'
'Databricks folder for production libraries: test_folder\n'
)
filename = join(expanduser('~'), '.apparatecfg')
expected_call_list = [
mock.call(filename, encoding=None),
mock.call(filename, 'w+'),
mock.call().write('[DEFAULT]\n'),
mock.call().write('host = https://test_host\n'),
mock.call().write('token = test_token\n'),
mock.call().write('prod_folder = test_folder\n'),
mock.call().write('\n'),
]
with mock.patch('builtins.open', mock.mock_open(read_data='')) as m_open:
runner = CliRunner()
result = runner.invoke(
configure,
input=(
'test_host\n'
'https://test_host\n'
'test_token\n'
'test_token\n'
'test_folder\n'
),
)
m_open.assert_has_calls(expected_call_list, any_order=True)
assert not result.exception
assert result.output == expected_stdout
@pytest.mark.fixture('existing_config')
@mock.patch('apparate.cli_commands._load_config')
@mock.patch('apparate.cli_commands.update_databricks')
def test_upload(update_databricks_mock, config_mock, existing_config):
config_mock.return_value = existing_config
runner = CliRunner()
result = runner.invoke(
upload,
['--path', '/path/to/egg']
)
config_mock.assert_called_once()
update_databricks_mock.assert_called_with(
logger,
'/path/to/egg',
'test_token',
'test_folder',
cleanup=False,
update_jobs=False,
)
assert not result.exception
@pytest.mark.fixture('existing_config')
@mock.patch('apparate.cli_commands._load_config')
@mock.patch('apparate.cli_commands.update_databricks')
def test_upload_all_options(
update_databricks_mock,
config_mock,
existing_config
):
config_mock.return_value = existing_config
runner = CliRunner()
result = runner.invoke(
upload,
[
'--path',
'/path/to/egg',
'--token',
'new_token',
'--folder',
'new_folder'
]
)
config_mock.assert_called_once()
update_databricks_mock.assert_called_with(
logger,
'/path/to/egg',
'new_token',
'new_folder',
cleanup=False,
update_jobs=False,
)
assert not result.exception
@pytest.mark.fixture('empty_config')
@mock.patch('apparate.cli_commands._load_config')
def test_upload_missing_token(config_mock, empty_config):
config_mock.return_value = empty_config
runner = CliRunner()
result = runner.invoke(
upload,
['--path', '/path/to/egg', '--folder', 'test_folder']
)
assert str(result.exception) == (
'no token found - either provide a command line argument or set up'
' a default by running `apparate configure`'
)
@pytest.mark.fixture('empty_config')
@mock.patch('apparate.cli_commands._load_config')
def test_upload_missing_folder(config_mock, empty_config):
config_mock.return_value = empty_config
runner = CliRunner()
result = runner.invoke(
upload,
['--path', '/path/to/egg', '--token', 'test_token']
)
assert str(result.exception) == (
'no folder found - either provide a command line argument or set up'
' a default by running `apparate configure`'
)
@pytest.mark.fixture('existing_config')
@mock.patch('apparate.cli_commands._load_config')
@mock.patch('apparate.cli_commands.update_databricks')
def test_upload_and_update_cleanup(
update_databricks_mock,
config_mock,
existing_config
):
config_mock.return_value = existing_config
runner = CliRunner()
result = runner.invoke(
upload_and_update,
['--path', '/path/to/egg']
)
config_mock.assert_called_once()
update_databricks_mock.assert_called_with(
logger,
'/path/to/egg',
'test_token',
'test_folder',
cleanup=True,
update_jobs=True,
)
assert not result.exception
@pytest.mark.fixture('existing_config')
@mock.patch('apparate.cli_commands._load_config')
@mock.patch('apparate.cli_commands.update_databricks')
def test_upload_and_update_no_cleanup(
update_databricks_mock,
config_mock,
existing_config
):
config_mock.return_value = existing_config
runner = CliRunner()
result = runner.invoke(
upload_and_update,
['--path', '/path/to/egg', '--no-cleanup']
)
config_mock.assert_called_once()
update_databricks_mock.assert_called_with(
logger,
'/path/to/egg',
'test_token',
'test_folder',
cleanup=False,
update_jobs=True,
)
assert not result.exception
@mock.patch('apparate.cli_commands._load_config')
def test_upload_and_update_missing_token(config_mock):
existing_config = ConfigParser()
existing_config['DEFAULT'] = {'prod_folder': 'test_folder'}
config_mock.return_value = existing_config
runner = CliRunner()
result = runner.invoke(
upload_and_update,
['--path', '/path/to/egg']
)
config_mock.assert_called_once()
assert str(result.exception) == (
'no token found - either provide a command line argument or set up'
' a default by running `apparate configure`'
)
@pytest.mark.fixture('empty_config')
@mock.patch('apparate.cli_commands._load_config')
def test_upload_and_update_missing_folder(config_mock, empty_config):
config_mock.return_value = empty_config
runner = CliRunner()
result = runner.invoke(
upload_and_update,
['-p', '/path/to/egg', '--token', 'test_token']
)
config_mock.assert_called_once()
assert str(result.exception) == (
'no folder found - either provide a command line argument or set up'
' a default by running `apparate configure`'
)
| 28.675 | 79 | 0.625303 | 0 | 0 | 0 | 0 | 4,932 | 0.477768 | 0 | 0 | 3,233 | 0.313184 |
f0726f920f21d92a489cfdea0b278639f7b0a413 | 4,632 | py | Python | pg_dicreate.py | zhuyeaini9/pytorch_test | 9654f7da144c71a65ec2665bc7128aaca5325302 | [
"Apache-2.0"
] | null | null | null | pg_dicreate.py | zhuyeaini9/pytorch_test | 9654f7da144c71a65ec2665bc7128aaca5325302 | [
"Apache-2.0"
] | null | null | null | pg_dicreate.py | zhuyeaini9/pytorch_test | 9654f7da144c71a65ec2665bc7128aaca5325302 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn.functional as F
import torch.nn as nn
import gym
from gym import spaces
import torch.optim as optim
from torch.distributions import Categorical
import random
import numpy as np
class Net(nn.Module):
def __init__(self, input_space, output_space):
super(Net, self).__init__()
self.fc1 = nn.Linear(input_space, 64)
self.fc2 = nn.Linear(64, 64)
self.fc3 = nn.Linear(64, output_space)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.softmax(self.fc3(x), dim=-1)
return x
class PGAgent(object):
def __init__(self, env_name,batch_size = 3):
self.m_batch_size = batch_size
self.m_env = gym.make(env_name)
self.m_env._max_episode_steps = 1000
self.m_action_type = self.get_action_type()
self.m_action_size = self.get_action_size()
self.m_net = Net(self.get_input_space(), self.get_output_space())
self.m_adam = optim.Adam(params=self.m_net.parameters(), lr=0.01)
self.m_batch_reward = []
self.m_batch_state = []
self.m_batch_action = []
def get_input_space(self):
return self.m_env.observation_space.shape[0]
def get_action_size(self):
if self.m_action_type == 1:
return self.m_env.action_space.n
def get_output_space(self):
if self.m_action_type == 1:
return self.m_env.action_space.n
def get_action_type(self):
if isinstance(self.m_env.action_space, spaces.Discrete):
action_type = 1
else:
action_type = 2
return action_type
def get_action(self, police_action, step_index, step_all):
if self.m_action_type == 1:
action_distribution = Categorical(police_action)
return action_distribution.sample().item()
def calculate_discounted_rewards_normal(self, reward_list):
re = self.calculate_discounted_rewards(reward_list)
return self.normalise_rewards(re)
def calculate_discounted_rewards(self, reward_list):
cur_re = 0
re_discounted_reward = []
for re in reversed(reward_list):
re = re + 0.99 * cur_re
cur_re = re
re_discounted_reward.append(re)
re_discounted_reward.reverse()
return re_discounted_reward
def normalise_rewards(self, rewards):
mean_reward = np.mean(rewards)
std_reward = np.std(rewards)
return (rewards - mean_reward) / (std_reward + 1e-8)
def reset(self):
self.m_batch_state = []
self.m_batch_action = []
self.m_batch_reward = []
def step(self, step_index, step_all):
self.reset()
state = self.m_env.reset()
batch = 0
reward_record = []
reward_list = []
action_list = []
state_list = []
while True:
#be careful of detach
out_action = self.m_net(torch.tensor(state).float().unsqueeze(dim=0)).detach()
out_action = out_action.squeeze(0)
tar_action = self.get_action(out_action, step_index, step_all)
new_state, reward, done, _ = self.m_env.step(tar_action)
reward_list.append(reward)
action_list.append(tar_action)
state_list.append(state)
state = new_state
if done:
batch += 1
self.m_batch_reward.extend(self.calculate_discounted_rewards_normal(reward_list))
self.m_batch_action.extend(action_list)
self.m_batch_state.extend(state_list)
reward_record.append(sum(reward_list))
reward_list = []
action_list = []
state_list = []
state = self.m_env.reset()
if batch == self.m_batch_size:
break
print(step_index, reward_record)
state_tensor = torch.FloatTensor(self.m_batch_state)
reward_tensor = torch.FloatTensor(self.m_batch_reward)
action_tensor = torch.LongTensor(self.m_batch_action)
# Calculate loss
log_prob = torch.log(self.m_net(state_tensor))
selected_logprobs = reward_tensor * log_prob[np.arange(len(action_tensor)), action_tensor]
loss = -selected_logprobs.mean()
self.m_adam.zero_grad()
# Calculate gradients
loss.backward()
# Apply gradients
self.m_adam.step()
def run_n_step(self, n):
for i in range(n):
self.step(i, n)
agent = PGAgent('CartPole-v0')
agent.run_n_step(300)
| 31.087248 | 98 | 0.617012 | 4,365 | 0.942358 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.018998 |
f072cdc953dde5ba78b66b40195edc1332c89bcf | 346 | py | Python | functions/dissectData/lambda_handler.py | zinedine-zeitnot/anomaly-detection | 2287f6488d47884d97ff618c24c379d869eb51f5 | [
"MIT"
] | 3 | 2021-04-30T12:51:01.000Z | 2021-06-04T12:51:32.000Z | functions/dissectData/lambda_handler.py | zinedine-zeitnot/anomaly-detection | 2287f6488d47884d97ff618c24c379d869eb51f5 | [
"MIT"
] | null | null | null | functions/dissectData/lambda_handler.py | zinedine-zeitnot/anomaly-detection | 2287f6488d47884d97ff618c24c379d869eb51f5 | [
"MIT"
] | null | null | null | from data_dissector import DataDissector
def handler(event, _):
switchpoint_trio = DataDissector.dissect_data(data=event['data'])
return {
"switchpoint": switchpoint_trio.switchpoint,
"preSwitchAverage": switchpoint_trio.pre_switch_average,
"postSwitchAverage": switchpoint_trio.post_switch_average,
}
| 28.833333 | 69 | 0.731214 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.16185 |
f0730fe6794bb60447a6e7f8e2d6de7cd6fc45d8 | 1,220 | py | Python | Chapter 3-Regression/2.py | FatiniNadhirah5/Datacamp-Machine-Learning-with-Apache-Spark-2019 | a0ef5f34c5a0aea222359a5085386f6a21611e7e | [
"FSFAP"
] | 8 | 2020-05-02T20:24:38.000Z | 2021-04-30T21:44:22.000Z | Chapter 3-Regression/2.py | FatiniNadhirah5/Machine-Learning-with-Apache-Spark | a0ef5f34c5a0aea222359a5085386f6a21611e7e | [
"FSFAP"
] | null | null | null | Chapter 3-Regression/2.py | FatiniNadhirah5/Machine-Learning-with-Apache-Spark | a0ef5f34c5a0aea222359a5085386f6a21611e7e | [
"FSFAP"
] | 9 | 2020-05-17T17:44:37.000Z | 2022-03-20T12:58:42.000Z | # Flight duration model: Just distance
# In this exercise you'll build a regression model to predict flight duration (the duration column).
# For the moment you'll keep the model simple, including only the distance of the flight (the km column) as a predictor.
# The data are in flights. The first few records are displayed in the terminal. These data have also been split into training and testing sets and are available as flights_train and flights_test.
# Instructions
# 100 XP
# Create a linear regression object. Specify the name of the label column. Fit it to the training data.
# Make predictions on the testing data.
# Create a regression evaluator object and use it to evaluate RMSE on the testing data.
from pyspark.ml.regression import LinearRegression
from pyspark.ml.evaluation import RegressionEvaluator
# Create a regression object and train on training data
regression = LinearRegression(labelCol='duration').fit(flights_train)
# Create predictions for the testing data and take a look at the predictions
predictions = regression.transform(flights_test)
predictions.select('duration', 'prediction').show(5, False)
# Calculate the RMSE
RegressionEvaluator(labelCol='duration').evaluate(predictions) | 48.8 | 195 | 0.80082 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 897 | 0.735246 |
f07320187da09dd13226ebf15b281c23c4b206d4 | 486 | py | Python | gipsy/admin.py | marwahaha/gipsy-1 | 5d31c37cff26b9b26cd6d24e1b6de13c81ebbe6e | [
"MIT"
] | 10 | 2015-02-11T02:11:33.000Z | 2018-03-22T13:08:33.000Z | gipsy/admin.py | marwahaha/gipsy-1 | 5d31c37cff26b9b26cd6d24e1b6de13c81ebbe6e | [
"MIT"
] | 9 | 2015-01-22T15:45:44.000Z | 2015-10-19T14:18:09.000Z | gipsy/admin.py | marwahaha/gipsy-1 | 5d31c37cff26b9b26cd6d24e1b6de13c81ebbe6e | [
"MIT"
] | 7 | 2015-04-28T15:20:57.000Z | 2019-07-16T03:45:12.000Z | from django.contrib import admin
class ChildrenInline(admin.TabularInline):
sortable_field_name = "order"
class GipsyMenu(admin.ModelAdmin):
inlines = [ChildrenInline]
exclude = ('parent',)
list_display = ['name', 'order']
ordering = ['order']
def get_queryset(self, request):
"""Overrides default queryset to only display parent items"""
query = super(GipsyMenu, self).get_queryset(request)
return query.filter(parent__isnull=True)
| 27 | 69 | 0.693416 | 447 | 0.919753 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.197531 |
f073a799e8b36554db301e779cfd3eed55011853 | 4,727 | py | Python | QDeblend/process/host_profiles.py | brandherd/QDeblend3D | 4e195ca027cf9fb65962ce66bf5d1f3e119b4f18 | [
"MIT"
] | null | null | null | QDeblend/process/host_profiles.py | brandherd/QDeblend3D | 4e195ca027cf9fb65962ce66bf5d1f3e119b4f18 | [
"MIT"
] | null | null | null | QDeblend/process/host_profiles.py | brandherd/QDeblend3D | 4e195ca027cf9fb65962ce66bf5d1f3e119b4f18 | [
"MIT"
] | null | null | null | import numpy, math
from scipy import special
"""
The Sersic Profile
Formulae for Sersic profile taken from Graham & Driver (2005)
bibcode: 2005PASA...22..118G
"""
class Sersic:
def __init__(self, size, x_c, y_c, mag, n, r_e, e=0., theta=0., osfactor=10,
osradius=2):
self.size = size
self.x_c = x_c
self.y_c = y_c
self.n = n
self.r_e = r_e
self.e = e
self.theta = theta
self.osf = osfactor
self.osr = osradius
flux = 10**(-0.4*mag)
self._get_kappa()
self.sigma_e = flux/(2*math.pi*(r_e**2)*math.exp(self.kappa)*n*
(self.kappa)**(-2.*n)*special.gamma(2.*n)*(1.-e))
self._make_array()
def _get_kappa(self):
init = 1.9992*self.n - 0.3271
self.kappa = self.__newton_it(init)
def __newton_it(self, x0, epsilon=1e-8):
for i in range(2000):
x0 -= self.__gammainc(x0)[0]/self.__gammainc(x0)[1]
if abs(self.__gammainc(x0)[0]) <= epsilon:
break
if i == 1999:
print 'Warning: Iteration failed!'
return x0
def __gammainc(self, x):
f = special.gammainc(2*self.n, x) - 0.5
df = (math.exp(-x) * x**(2.*self.n - 1.))/special.gamma(2.*self.n)
return (f, df)
def _make_array(self):
self.array = numpy.fromfunction(self._draw, self.size, dtype='float32')
if self.osf != 1:
csize = ((2*self.osr+1)*self.osf, (2*self.osr+1)*self.osf)
x_n = int(round(self.x_c))
y_n = int(round(self.y_c))
self.x_c += (self.osr - round(self.x_c))
self.y_c += (self.osr - round(self.y_c))
self.x_c *= self.osf
self.y_c *= self.osf
self.x_c += 0.5*(self.osf-1.)
self.y_c += 0.5*(self.osf-1.)
self.r_e *= self.osf
self.sigma_e /= (self.osf)**2
carray = numpy.fromfunction(self._draw, csize, dtype='float32')
s1_size = (2*self.osr+1, (2*self.osr+1)*self.osf, self.osf)
s2_size = (2*self.osr+1, 2*self.osr+1, self.osf)
step1 = numpy.sum(numpy.reshape(carray, s1_size, 'C'), axis=2)
step2 = numpy.sum(numpy.reshape(step1, s2_size, 'F'), axis=2)
self.array[y_n-self.osr:y_n+self.osr+1,
x_n-self.osr:x_n+self.osr+1] = step2
def _draw(self, y, x):
u = (x-self.x_c)*math.sin(self.theta)-(y-self.y_c)*math.cos(self.theta)
v = (y-self.y_c)*math.sin(self.theta)+(x-self.x_c)*math.cos(self.theta)
r = numpy.sqrt(u**2 + (v/(1. - self.e))**2)
return self.sigma_e*numpy.exp(-self.kappa*((r/self.r_e)**(1/self.n)-1))
def cut_area(in_array, center, radius, output=''):
x_i = round(center[0], 0)
y_i = round(center[1], 0)
shape = in_array.shape
out_array = numpy.zeros((2*radius+1, 2*radius+1), dtype='float32')
out_shape = out_array.shape
xmin = max(0, int(x_i - radius))
xmax = min(shape[1], int(x_i + radius + 1))
ymin = max(0, int(y_i - radius))
ymax = min(shape[0], int(y_i + radius + 1))
xlo = max(0, int(radius - x_i))
xhi = out_shape[1] - max(0, int(x_i + radius + 1 - shape[1]))
ylo = max(0, int(radius - y_i))
yhi = out_shape[0] - max(0, int(y_i + radius + 1 - shape[0]))
out_array[ylo:yhi,xlo:xhi] = in_array[ymin:ymax,xmin:xmax]
if output == 'full':
filled_pix = numpy.zeros(out_shape, dtype='int16')
filled_pix[ylo:yhi,xlo:xhi] += 1
return out_array, filled_pix
else:
return out_array
def paste_area(in_array, out_shape, refpix_in, refpix_out, out_array=None):
xpix_in = int(round(refpix_in[0], 0))
ypix_in = int(round(refpix_in[1], 0))
xpix_out = int(round(refpix_out[0], 0))
ypix_out = int(round(refpix_out[1], 0))
in_shape = in_array.shape
xmin = max(0, xpix_in - xpix_out)
xmax = in_shape[1] - max(0, (in_shape[1]-xpix_in) - (out_shape[1]-xpix_out))
ymin = max(0, ypix_in - ypix_out)
ymax = in_shape[0] - max(0, (in_shape[0]-ypix_in) - (out_shape[0]-ypix_out))
xlo = max(0, xpix_out - xpix_in)
xhi = out_shape[1] - max(0, (out_shape[1]-xpix_out) - (in_shape[1]-xpix_in))
ylo = max(0, ypix_out - ypix_in)
yhi = out_shape[0] - max(0, (out_shape[0]-ypix_out) - (in_shape[0]-ypix_in))
if out_array is None:
out_array = numpy.zeros(out_shape, dtype='float32')
if ylo < out_array.shape[0] and yhi > 0:
if xlo < out_array.shape[1] and xhi > 0:
out_array[ylo:yhi, xlo:xhi] = in_array[ymin:ymax, xmin:xmax]
return out_array | 34.757353 | 80 | 0.557436 | 2,646 | 0.559763 | 0 | 0 | 0 | 0 | 0 | 0 | 202 | 0.042733 |
f07411bf6835efa66845aedc9d0915e9f4597ba2 | 1,138 | py | Python | UnitTests/FullAtomModel/CoordsTransform/test_forward.py | johahi/TorchProteinLibrary | b1fc9faa9b51c4550e5f754d075766ba38e0f8a0 | [
"MIT"
] | null | null | null | UnitTests/FullAtomModel/CoordsTransform/test_forward.py | johahi/TorchProteinLibrary | b1fc9faa9b51c4550e5f754d075766ba38e0f8a0 | [
"MIT"
] | null | null | null | UnitTests/FullAtomModel/CoordsTransform/test_forward.py | johahi/TorchProteinLibrary | b1fc9faa9b51c4550e5f754d075766ba38e0f8a0 | [
"MIT"
] | null | null | null | import sys
import os
import torch
import numpy as np
from TorchProteinLibrary.FullAtomModel.CoordsTransform import CoordsTranslate, getRandomTranslation, getBBox, CoordsRotate, getRandomRotation
from TorchProteinLibrary.FullAtomModel import Angles2Coords, Coords2TypedCoords
def test_translation(coords, num_atoms):
translate = CoordsTranslate()
a,b = getBBox(coords, num_atoms)
center = (a+b)*0.5
print (center)
centered_coords = translate(coords, -center, num_atoms)
a,b = getBBox(centered_coords, num_atoms)
center = (a+b)*0.5
print(center)
def test_rotation(coords, num_atoms):
batch_size = num_atoms.size(0)
R = getRandomRotation(batch_size)
rotate = CoordsRotate()
rotated = rotate(coords, R, num_atoms)
print(rotated)
if __name__=='__main__':
sequences = ['GGGGGG', 'GGAARRRRRRRRR']
angles = torch.zeros(2, 7,len(sequences[1]), dtype=torch.double)
angles[:,0,:] = -1.047
angles[:,1,:] = -0.698
angles[:,2:,:] = 110.4*np.pi/180.0
a2c = Angles2Coords()
protein, res_names, atom_names, num_atoms = a2c(angles, sequences)
test_translation(protein, num_atoms)
test_rotation(protein, num_atoms)
| 25.863636 | 141 | 0.748682 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.028998 |
f076212c69c217204a0f335bc5923354550eed68 | 671 | py | Python | tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_93456241.py | eduardojdiniz/CompNeuro | 20269e66540dc4e802273735c97323020ee37406 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 2,294 | 2020-05-11T12:05:35.000Z | 2022-03-28T21:23:34.000Z | tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_93456241.py | pellet/course-content | bb383857992469e0e7a9c36639ac0d05e842d9bd | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 629 | 2020-05-11T15:42:26.000Z | 2022-03-29T12:23:35.000Z | tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_93456241.py | pellet/course-content | bb383857992469e0e7a9c36639ac0d05e842d9bd | [
"CC-BY-4.0",
"BSD-3-Clause"
] | 917 | 2020-05-11T12:47:53.000Z | 2022-03-31T12:14:41.000Z |
# Set random number generator
np.random.seed(2020)
# Initialize step_end, n, t_range, v and i
step_end = int(t_max / dt)
n = 50
t_range = np.linspace(0, t_max, num=step_end)
v_n = el * np.ones([n, step_end])
i = i_mean * (1 + 0.1 * (t_max / dt)**(0.5) * (2 * np.random.random([n, step_end]) - 1))
# Loop for step_end - 1 steps
for step in range(1, step_end):
# Compute v_n
v_n[:, step] = v_n[:, step - 1] + (dt / tau) * (el - v_n[:, step - 1] + r * i[:, step])
# Plot figure
with plt.xkcd():
plt.figure()
plt.title('Multiple realizations of $V_m$')
plt.xlabel('time (s)')
plt.ylabel('$V_m$ (V)')
plt.plot(t_range, v_n.T, 'k', alpha=0.3)
plt.show() | 25.807692 | 90 | 0.600596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.271237 |
f076aaf49a3d8fba6fb5ba17c6020bb113d2de01 | 5,417 | py | Python | src/jsonengine/main.py | youhengzhou/json-crud-engine | 8ee614af6dddbe1236a78a7debf71048f476a3ff | [
"MIT"
] | 2 | 2021-07-02T04:33:36.000Z | 2022-01-09T23:40:30.000Z | src/jsonengine/main.py | youhengzhou/json-crud-engine | 8ee614af6dddbe1236a78a7debf71048f476a3ff | [
"MIT"
] | null | null | null | src/jsonengine/main.py | youhengzhou/json-crud-engine | 8ee614af6dddbe1236a78a7debf71048f476a3ff | [
"MIT"
] | null | null | null | # JSON engine 21 9 16
# database
# eng.json
# engine
# eng.py
import os
import json
path = os.getcwd() + '\\json_engine_database\\'
path_string = ''
def set_path(string):
global path
path = os.getcwd() + string
def dictionary_kv(dictionary, key, value):
dictionary[key] = value
return dictionary
def set_path_string(args,create_flag):
global path_string
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string)==False:
if create_flag == True:
os.makedirs(path + path_string)
else:
return False
return path_string
def create(dictionary, *args):
path_string = set_path_string(args,True)
with open(path + path_string + 'eng.json', 'w') as outfile:
json.dump(dictionary, outfile, indent=4)
def retrieve(*args):
path_string = set_path_string(args,False)
if path_string == False:
return False
with open(path + path_string + 'eng.json', 'r') as f:
return(json.load(f))
def retrieve_k(key, *args):
path_string = set_path_string(args,False)
if path_string == False:
return False
with open(path + path_string + 'eng.json', 'r') as f:
if key in json.load(f):
with open(path + path_string + 'eng.json', 'r') as f:
return(json.load(f)[key])
else:
return False
def update(dictionary, *args):
path_string = set_path_string(args,False)
if path_string == False:
return False
with open(path + path_string + 'eng.json', 'w') as outfile:
json.dump(dictionary, outfile, indent=4)
return True
def update_kv(key, value, *args):
path_string = set_path_string(args,False)
if path_string == False:
return False
with open(path + path_string + 'eng.json', 'w') as outfile:
json.dump({key: value}, outfile, indent=4)
return True
def patch(dictionary, *args):
path_string = set_path_string(args,False)
if path_string == False:
return False
with open(path + path_string + 'eng.json', 'r') as f:
data=(json.load(f))
data.update(dictionary)
with open(path + path_string + 'eng.json', 'w') as outfile:
json.dump(data, outfile, indent=4)
return True
def patch_kv(key, value, *args):
path_string = set_path_string(args,False)
if path_string == False:
return False
with open(path + path_string + 'eng.json', 'r') as f:
data=(json.load(f))
data.update({key: value})
with open(path + path_string + 'eng.json', 'w') as outfile:
json.dump(data, outfile, indent=4)
return True
def delete(*args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
os.remove(path + path_string + 'eng.json')
os.rmdir(path + path_string)
return True
else:
return False
def delete_k(key, *args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
with open(path + path_string + 'eng.json', 'r') as f:
if key in json.load(f):
data = json.load(f)
data.pop(key)
with open(path + path_string + 'eng.json', 'w') as outfile:
json.dump(data, outfile, indent=4)
return True
else:
return False
else:
return False
def display(*args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
with open(path + path_string + 'eng.json', 'r') as f:
print(json.load(f))
return True
else:
print('The selected file does not exist')
return False
def display_key(key, *args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
with open(path + path_string + 'eng.json', 'r') as f:
if key in json.load(f):
print(key + ' ' + str(json.load(f)[key]))
return True
else:
print('The selected file does not exist')
return False
def display_nkv(key, *args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
with open(path + path_string + 'eng.json', 'r') as f:
if key in json.load(f):
data = json.load(f)
data.pop(key,'key not found')
print(data)
return True
else:
print('The selected file does not exist')
return False
def display_ind(*args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
with open(path + path_string + 'eng.json', 'r') as f:
print(json.dumps(json.load(f), indent=4))
else:
print('The selected file does not exist')
def display_ind_nkv(key, *args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
with open(path + path_string + 'eng.json', 'r') as f:
data = json.load(f)
data.pop(key,'key not found')
print(json.dumps(data, indent=4))
else:
print('The selected file does not exist')
| 31.132184 | 75 | 0.568027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 637 | 0.117593 |
f076d7596035ea99f16b2ee688410ac4e2c0be9a | 2,292 | py | Python | tests/coro.py | dshean/sliderule-python | 3cf9a6c65987705354cb536d71f85a32fbb24d15 | [
"BSD-3-Clause"
] | 1 | 2021-04-09T22:01:33.000Z | 2021-04-09T22:01:33.000Z | tests/coro.py | dshean/sliderule-python | 3cf9a6c65987705354cb536d71f85a32fbb24d15 | [
"BSD-3-Clause"
] | null | null | null | tests/coro.py | dshean/sliderule-python | 3cf9a6c65987705354cb536d71f85a32fbb24d15 | [
"BSD-3-Clause"
] | null | null | null | # python
import sys
import h5coro
###############################################################################
# DATA
###############################################################################
# set resource
resource = "file:///data/ATLAS/ATL06_20200714160647_02950802_003_01.h5"
# expected single read
h_li_exp_1 = [3432.17578125, 3438.776611328125, 3451.01123046875, 3462.688232421875, 3473.559326171875]
# expected parallel read
h_li_exp_2 = { '/gt1l/land_ice_segments/h_li': [3432.17578125, 3438.776611328125, 3451.01123046875, 3462.688232421875, 3473.559326171875],
'/gt2l/land_ice_segments/h_li': [3263.659912109375, 3258.362548828125, 3.4028234663852886e+38, 3233.031494140625, 3235.200927734375],
'/gt3l/land_ice_segments/h_li': [3043.489013671875, 3187.576171875, 3.4028234663852886e+38, 4205.04248046875, 2924.724365234375]}
###############################################################################
# UTILITY FUNCTIONS
###############################################################################
def check_results(act, exp):
if type(exp) == dict:
for dataset in exp:
for i in range(len(exp[dataset])):
if exp[dataset][i] != act[dataset][i]:
print("Failed parallel read test")
return False
print("Passed parallel read test")
return True
else:
for i in range(len(exp)):
if exp[i] != act[i]:
print("Failed single read test")
return False
print("Passed single read test")
return True
###############################################################################
# MAIN
###############################################################################
if __name__ == '__main__':
# Open H5Coro File #
h5file = h5coro.file(resource)
# Perform Single Read #
h_li_1 = h5file.read("/gt1l/land_ice_segments/h_li", 0, 19, 5)
check_results(h_li_1, h_li_exp_1)
# Perform Parallel Read #
datasets = [["/gt1l/land_ice_segments/h_li", 0, 19, 5],
["/gt2l/land_ice_segments/h_li", 0, 19, 5],
["/gt3l/land_ice_segments/h_li", 0, 19, 5]]
h_li_2 = h5file.readp(datasets)
check_results(h_li_2, h_li_exp_2)
| 38.2 | 149 | 0.505236 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,025 | 0.447208 |
f077b57af7bb1555b754ae7c06fad787a7e42f43 | 30,165 | py | Python | lib/interpreter.py | xraypy/_xraylarch_attic | a78a2d257bccb081ad15c43c831dee51d0b4845a | [
"BSD-3-Clause"
] | 1 | 2019-04-23T02:30:00.000Z | 2019-04-23T02:30:00.000Z | lib/interpreter.py | xraypy/_xraylarch_attic | a78a2d257bccb081ad15c43c831dee51d0b4845a | [
"BSD-3-Clause"
] | null | null | null | lib/interpreter.py | xraypy/_xraylarch_attic | a78a2d257bccb081ad15c43c831dee51d0b4845a | [
"BSD-3-Clause"
] | null | null | null | """
Main Larch interpreter
Safe(ish) evaluator of python expressions, using ast module.
The emphasis here is on mathematical expressions, and so
numpy functions are imported if available and used.
"""
from __future__ import division, print_function
import os
import sys
import ast
import math
import numpy
from . import builtins
from . import site_config
from .symboltable import SymbolTable, Group, isgroup
from .larchlib import LarchExceptionHolder, Procedure, DefinedVariable
from .utils import Closure
OPERATORS = {ast.Is: lambda a, b: a is b,
ast.IsNot: lambda a, b: a is not b,
ast.In: lambda a, b: a in b,
ast.NotIn: lambda a, b: a not in b,
ast.Add: lambda a, b: a + b,
ast.BitAnd: lambda a, b: a & b,
ast.BitOr: lambda a, b: a | b,
ast.BitXor: lambda a, b: a ^ b,
ast.Div: lambda a, b: a / b,
ast.FloorDiv: lambda a, b: a // b,
ast.LShift: lambda a, b: a << b,
ast.RShift: lambda a, b: a >> b,
ast.Mult: lambda a, b: a * b,
ast.Pow: lambda a, b: a ** b,
ast.Sub: lambda a, b: a - b,
ast.Mod: lambda a, b: a % b,
ast.And: lambda a, b: a and b,
ast.Or: lambda a, b: a or b,
ast.Eq: lambda a, b: a == b,
ast.Gt: lambda a, b: a > b,
ast.GtE: lambda a, b: a >= b,
ast.Lt: lambda a, b: a < b,
ast.LtE: lambda a, b: a <= b,
ast.NotEq: lambda a, b: a != b,
ast.Invert: lambda a: ~a,
ast.Not: lambda a: not a,
ast.UAdd: lambda a: +a,
ast.USub: lambda a: -a}
class Interpreter:
"""larch program compiler and interpreter.
This module compiles expressions and statements to AST representation,
using python's ast module, and then executes the AST representation
using a custom SymbolTable for named object (variable, functions).
This then gives a restricted version of Python, with slightly modified
namespace rules. The program syntax here is expected to be valid Python,
but that may have been translated as with the inputText module.
The following Python syntax is not supported:
Exec, Lambda, Class, Global, Generators, Yield, Decorators
In addition, Function is greatly altered so as to allow a Larch procedure.
"""
supported_nodes = ('arg', 'assert', 'assign', 'attribute', 'augassign',
'binop', 'boolop', 'break', 'call', 'compare',
'continue', 'delete', 'dict', 'ellipsis',
'excepthandler', 'expr', 'expression', 'extslice',
'for', 'functiondef', 'if', 'ifexp', 'import',
'importfrom', 'index', 'interrupt', 'list',
'listcomp', 'module', 'name', 'num', 'pass',
'print', 'raise', 'repr', 'return', 'slice', 'str',
'subscript', 'tryexcept', 'tuple', 'unaryop',
'while')
def __init__(self, symtable=None, writer=None):
self.writer = writer or sys.stdout
if symtable is None:
symtable = SymbolTable(larch=self)
self.symtable = symtable
self._interrupt = None
self.error = []
self.expr = None
self.retval = None
self.func = None
self.fname = '<stdin>'
self.lineno = 0
builtingroup = getattr(symtable,'_builtin')
mathgroup = getattr(symtable,'_math')
setattr(mathgroup, 'j', 1j)
for sym in builtins.from_math:
setattr(mathgroup, sym, getattr(math, sym))
for sym in builtins.from_builtin:
setattr(builtingroup, sym, __builtins__[sym])
for sym in builtins.from_numpy:
try:
setattr(mathgroup, sym, getattr(numpy, sym))
except AttributeError:
pass
for fname, sym in list(builtins.numpy_renames.items()):
setattr(mathgroup, fname, getattr(numpy, sym))
for fname, fcn in list(builtins.local_funcs.items()):
setattr(builtingroup, fname,
Closure(func=fcn, _larch=self, _name=fname))
setattr(builtingroup, 'definevar',
Closure(func=self.set_definedvariable))
# add all plugins in standard plugins folder
plugins_dir = os.path.join(site_config.sys_larchdir, 'plugins')
for pname in os.listdir(plugins_dir):
pdir = os.path.join(plugins_dir, pname)
if os.path.isdir(pdir):
self.add_plugin(pdir)
self.node_handlers = dict(((node, getattr(self, "on_%s" % node))
for node in self.supported_nodes))
def add_plugin(self, mod, **kws):
"""add plugin components from plugin directory"""
builtins._addplugin(mod, _larch=self, **kws)
def set_definedvariable(self, name, expr):
"""define a defined variable (re-evaluate on access)"""
self.symtable.set_symbol(name,
DefinedVariable(expr=expr, _larch=self))
def unimplemented(self, node):
"unimplemented nodes"
self.raise_exception(node, exc=NotImplementedError,
msg="'%s' not supported" % (node.__class__.__name__))
def raise_exception(self, node, exc=None, msg='', expr=None,
fname=None, lineno=None, func=None):
"add an exception"
if self.error is None:
self.error = []
if expr is None:
expr = self.expr
if fname is None:
fname = self.fname
if lineno is None:
lineno = self.lineno
if func is None:
func = self.func
if len(self.error) > 0 and not isinstance(node, ast.Module):
msg = '%s' % msg
err = LarchExceptionHolder(node, exc=exc, msg=msg, expr=expr,
fname=fname, lineno=lineno, func=func)
self._interrupt = ast.Break()
self.error.append(err)
self.symtable._sys.last_error = err
#raise RuntimeError
# main entry point for Ast node evaluation
# parse: text of statements -> ast
# run: ast -> result
# eval: string statement -> result = run(parse(statement))
def parse(self, text, fname=None, lineno=-1):
"""parse statement/expression to Ast representation """
self.expr = text
try:
return ast.parse(text)
except:
self.raise_exception(None, exc=SyntaxError, msg='Syntax Error',
expr=text, fname=fname, lineno=lineno)
def run(self, node, expr=None, func=None,
fname=None, lineno=None, with_raise=False):
"""executes parsed Ast representation for an expression"""
# Note: keep the 'node is None' test: internal code here may run
# run(None) and expect a None in return.
# print(" Run", node, expr)
if node is None:
return None
if isinstance(node, str):
node = self.parse(node)
if lineno is not None:
self.lineno = lineno
if fname is not None:
self.fname = fname
if expr is not None:
self.expr = expr
if func is not None:
self.func = func
# get handler for this node:
# on_xxx with handle nodes of type 'xxx', etc
if node.__class__.__name__.lower() not in self.node_handlers:
return self.unimplemented(node)
handler = self.node_handlers[node.__class__.__name__.lower()]
# run the handler: this will likely generate
# recursive calls into this run method.
try:
ret = handler(node)
if isinstance(ret, enumerate):
ret = list(ret)
return ret
except:
self.raise_exception(node, expr=self.expr,
fname=self.fname, lineno=self.lineno)
def __call__(self, expr, **kw):
return self.eval(expr, **kw)
def eval(self, expr, fname=None, lineno=0):
"""evaluates a single statement"""
self.fname = fname
self.lineno = lineno
self.error = []
try:
node = self.parse(expr, fname=fname, lineno=lineno)
except RuntimeError:
errmsg = sys.exc_info()[1]
if len(self.error) > 0:
errtype, errmsg = self.error[0].get_error()
return
out = None
try:
return self.run(node, expr=expr, fname=fname, lineno=lineno)
except RuntimeError:
return
def run_init_scripts(self):
for fname in site_config.init_files:
if os.path.exists(fname):
try:
builtins._run(filename=fname, _larch=self,
printall = True)
except:
self.raise_exception(None, exc=RuntimeError,
msg='Initialization Error')
def dump(self, node, **kw):
"simple ast dumper"
return ast.dump(node, **kw)
# handlers for ast components
def on_expr(self, node):
"expression"
return self.run(node.value) # ('value',)
def on_index(self, node):
"index"
return self.run(node.value) # ('value',)
def on_return(self, node): # ('value',)
"return statement"
self.retval = self.run(node.value)
return
def on_repr(self, node):
"repr "
return repr(self.run(node.value)) # ('value',)
def on_module(self, node): # ():('body',)
"module def"
out = None
for tnode in node.body:
out = self.run(tnode)
return out
def on_expression(self, node):
"basic expression"
return self.on_module(node) # ():('body',)
def on_pass(self, node):
"pass statement"
return None # ()
def on_ellipsis(self, node):
"ellipses"
return Ellipsis
# for break and continue: set the instance variable _interrupt
def on_interrupt(self, node): # ()
"interrupt handler"
self._interrupt = node
return node
def on_break(self, node):
"break"
return self.on_interrupt(node)
def on_continue(self, node):
"continue"
return self.on_interrupt(node)
def on_arg(self, node):
"arg for function definitions"
return node.arg
def on_assert(self, node): # ('test', 'msg')
"assert statement"
testval = self.run(node.test)
if not testval:
self.raise_exception(node, exc=AssertionError, msg=node.msg)
return True
def on_list(self, node): # ('elt', 'ctx')
"list"
return [self.run(e) for e in node.elts]
def on_tuple(self, node): # ('elts', 'ctx')
"tuple"
return tuple(self.on_list(node))
def on_dict(self, node): # ('keys', 'values')
"dictionary"
nodevals = list(zip(node.keys, node.values))
run = self.run
return dict([(run(k), run(v)) for k, v in nodevals])
def on_num(self, node):
'return number'
return node.n # ('n',)
def on_str(self, node):
'return string'
return node.s # ('s',)
def on_name(self, node): # ('id', 'ctx')
""" Name node """
ctx = node.ctx.__class__
if ctx == ast.Del:
val = self.symtable.del_symbol(node.id)
elif ctx == ast.Param: # for Function Def
val = str(node.id)
else:
# val = self.symtable.get_symbol(node.id)
try:
val = self.symtable.get_symbol(node.id)
except (NameError, LookupError):
msg = "name '%s' is not defined" % node.id
self.raise_exception(node, msg=msg)
if isinstance(val, DefinedVariable):
val = val.evaluate()
return val
def node_assign(self, node, val):
"""here we assign a value (not the node.value object) to a node
this is used by on_assign, but also by for, list comprehension, etc.
"""
if len(self.error) > 0:
return
if node.__class__ == ast.Name:
sym = self.symtable.set_symbol(node.id, value=val)
elif node.__class__ == ast.Attribute:
if node.ctx.__class__ == ast.Load:
errmsg = "cannot assign to attribute %s" % node.attr
self.raise_exception(node, exc=AttributeError, msg=errmsg)
setattr(self.run(node.value), node.attr, val)
elif node.__class__ == ast.Subscript:
sym = self.run(node.value)
xslice = self.run(node.slice)
if isinstance(node.slice, ast.Index):
sym[xslice] = val
elif isinstance(node.slice, ast.Slice):
i = xslice.start
sym[slice(xslice.start, xslice.stop)] = val
elif isinstance(node.slice, ast.ExtSlice):
sym[(xslice)] = val
elif node.__class__ in (ast.Tuple, ast.List):
if len(val) == len(node.elts):
for telem, tval in zip(node.elts, val):
self.node_assign(telem, tval)
else:
raise ValueError('too many values to unpack')
def on_attribute(self, node): # ('value', 'attr', 'ctx')
"extract attribute"
ctx = node.ctx.__class__
# print("on_attribute",node.value,node.attr,ctx)
if ctx == ast.Load:
sym = self.run(node.value)
if hasattr(sym, node.attr):
val = getattr(sym, node.attr)
if isinstance(val, DefinedVariable):
val = val.evaluate()
return val
else:
obj = self.run(node.value)
fmt = "%s does not have member '%s'"
if not isgroup(obj):
obj = obj.__class__
fmt = "%s does not have attribute '%s'"
msg = fmt % (obj, node.attr)
self.raise_exception(node, exc=AttributeError, msg=msg)
elif ctx == ast.Del:
return delattr(sym, node.attr)
elif ctx == ast.Store:
msg = "attribute for storage: shouldn't be here!"
self.raise_exception(node, exc=RuntimeError, msg=msg)
def on_assign(self, node): # ('targets', 'value')
"simple assignment"
val = self.run(node.value)
if len(self.error) > 0:
return
for tnode in node.targets:
self.node_assign(tnode, val)
return # return val
def on_augassign(self, node): # ('target', 'op', 'value')
"augmented assign"
# print( "AugASSIGN ", node.target, node.value)
return self.on_assign(ast.Assign(targets=[node.target],
value=ast.BinOp(left = node.target,
op = node.op,
right= node.value)))
def on_slice(self, node): # ():('lower', 'upper', 'step')
"simple slice"
return slice(self.run(node.lower), self.run(node.upper),
self.run(node.step))
def on_extslice(self, node): # ():('dims',)
"extended slice"
return tuple([self.run(tnode) for tnode in node.dims])
def on_subscript(self, node): # ('value', 'slice', 'ctx')
"subscript handling -- one of the tricky parts"
# print("on_subscript: ", ast.dump(node))
val = self.run(node.value)
nslice = self.run(node.slice)
ctx = node.ctx.__class__
if ctx in ( ast.Load, ast.Store):
if isinstance(node.slice, (ast.Index, ast.Slice, ast.Ellipsis)):
return val.__getitem__(nslice)
elif isinstance(node.slice, ast.ExtSlice):
return val[(nslice)]
else:
msg = "subscript with unknown context"
self.raise_exception(node, msg=msg)
def on_delete(self, node): # ('targets',)
"delete statement"
for tnode in node.targets:
if tnode.ctx.__class__ != ast.Del:
break
children = []
while tnode.__class__ == ast.Attribute:
children.append(tnode.attr)
tnode = tnode.value
if tnode.__class__ == ast.Name:
children.append(tnode.id)
children.reverse()
self.symtable.del_symbol('.'.join(children))
else:
msg = "could not delete symbol"
self.raise_exception(node, msg=msg)
def on_unaryop(self, node): # ('op', 'operand')
"unary operator"
return OPERATORS[node.op.__class__](self.run(node.operand))
def on_binop(self, node): # ('left', 'op', 'right')
"binary operator"
# print( 'BINARY OP! ', node.left, node.right, node.op)
return OPERATORS[node.op.__class__](self.run(node.left),
self.run(node.right))
def on_boolop(self, node): # ('op', 'values')
"boolean operator"
val = self.run(node.values[0])
is_and = ast.And == node.op.__class__
if (is_and and val) or (not is_and and not val):
for n in node.values[1:]:
val = OPERATORS[node.op.__class__](val, self.run(n))
if (is_and and not val) or (not is_and and val):
break
return val
def on_compare(self, node): # ('left', 'ops', 'comparators')
"comparison operators"
lval = self.run(node.left)
out = True
for oper, rnode in zip(node.ops, node.comparators):
comp = OPERATORS[oper.__class__]
rval = self.run(rnode)
out = comp(lval, rval)
lval = rval
if isinstance(out, numpy.ndarray) and out.any():
break
elif not out:
break
return out
def on_print(self, node): # ('dest', 'values', 'nl')
""" note: implements Python2 style print statement, not
print() function. Probably, the 'larch2py' translation
should look for and translate print -> print_() to become
a customized function call.
"""
dest = self.run(node.dest) or self.writer
end = ''
if node.nl:
end = '\n'
out = [self.run(tnode) for tnode in node.values]
if out and len(self.error)==0:
print(*out, file=dest, end=end)
def on_if(self, node): # ('test', 'body', 'orelse')
"regular if-then-else statement"
block = node.body
if not self.run(node.test):
block = node.orelse
for tnode in block:
self.run(tnode)
def on_ifexp(self, node): # ('test', 'body', 'orelse')
"if expressions"
expr = node.orelse
if self.run(node.test):
expr = node.body
return self.run(expr)
def on_while(self, node): # ('test', 'body', 'orelse')
"while blocks"
while self.run(node.test):
self._interrupt = None
for tnode in node.body:
self.run(tnode)
if self._interrupt is not None:
break
if isinstance(self._interrupt, ast.Break):
break
else:
for tnode in node.orelse:
self.run(tnode)
self._interrupt = None
def on_for(self, node): # ('target', 'iter', 'body', 'orelse')
"for blocks"
for val in self.run(node.iter):
self.node_assign(node.target, val)
if len(self.error) > 0:
return
self._interrupt = None
for tnode in node.body:
self.run(tnode)
if len(self.error) > 0:
return
if self._interrupt is not None:
break
if isinstance(self._interrupt, ast.Break):
break
else:
for tnode in node.orelse:
self.run(tnode)
self._interrupt = None
def on_listcomp(self, node): # ('elt', 'generators')
"list comprehension"
out = []
for tnode in node.generators:
if tnode.__class__ == ast.comprehension:
for val in self.run(tnode.iter):
self.node_assign(tnode.target, val)
if len(self.error) > 0:
return
add = True
for cond in tnode.ifs:
add = add and self.run(cond)
if add:
out.append(self.run(node.elt))
return out
#
def on_excepthandler(self, node): # ('type', 'name', 'body')
"exception handler..."
# print("except handler %s / %s " % (node.type, ast.dump(node.name)))
return (self.run(node.type), node.name, node.body)
def on_tryexcept(self, node): # ('body', 'handlers', 'orelse')
"try/except blocks"
no_errors = True
for tnode in node.body:
# print(" Try Node: " , self.dump(tnode))
self.run(tnode)
# print(" Error len: " , len(self.error))
no_errors = no_errors and len(self.error) == 0
if self.error:
e_type, e_value, e_tb = self.error[-1].exc_info
#print(" ERROR: ", e_type, e_value, e_tb)
#print(" ... ", self.error)
this_exc = e_type()
for hnd in node.handlers:
htype = None
if hnd.type is not None:
htype = __builtins__.get(hnd.type.id, None)
# print(" ERR HANDLER ", htype)
if htype is None or isinstance(this_exc, htype):
self.error = []
if hnd.name is not None:
self.node_assign(hnd.name, e_value)
for tline in hnd.body:
self.run(tline)
break
if no_errors:
for tnode in node.orelse:
self.run(tnode)
def on_raise(self, node): # ('type', 'inst', 'tback')
"raise statement"
# print(" ON RAISE ", node.type, node.inst, node.tback)
if sys.version_info[0] == 3:
excnode = node.exc
msgnode = node.cause
else:
excnode = node.type
msgnode = node.inst
out = self.run(excnode)
msg = ' '.join(out.args)
msg2 = self.run(msgnode)
if msg2 not in (None, 'None'):
msg = "%s: %s" % (msg, msg2)
self.raise_exception(None, exc=out.__class__, msg=msg, expr='')
def on_call(self, node):
"function/procedure execution"
# ('func', 'args', 'keywords', 'starargs', 'kwargs')
func = self.run(node.func)
if not hasattr(func, '__call__') and not isinstance(func, type):
msg = "'%s' is not callable!!" % (func)
self.raise_exception(node, exc=TypeError, msg=msg)
args = [self.run(targ) for targ in node.args]
if node.starargs is not None:
args = args + self.run(node.starargs)
keywords = {}
for key in node.keywords:
if not isinstance(key, ast.keyword):
msg = "keyword error in function call '%s'" % (func)
self.raise_exception(node, exc=TypeError, msg=msg)
keywords[key.arg] = self.run(key.value)
if node.kwargs is not None:
keywords.update(self.run(node.kwargs))
self.func = func
out = func(*args, **keywords)
self.func = None
return out
# try:
# except:
# self.raise_exception(node, exc=RuntimeError, func=func,
# msg = "Error running %s" % (func))
def on_functiondef(self, node):
"define procedures"
# ('name', 'args', 'body', 'decorator_list')
if node.decorator_list != []:
raise Warning("decorated procedures not supported!")
kwargs = []
offset = len(node.args.args) - len(node.args.defaults)
for idef, defnode in enumerate(node.args.defaults):
defval = self.run(defnode)
keyval = self.run(node.args.args[idef+offset])
kwargs.append((keyval, defval))
# kwargs.reverse()
args = [tnode.id for tnode in node.args.args[:offset]]
doc = None
if (isinstance(node.body[0], ast.Expr) and
isinstance(node.body[0].value, ast.Str)):
docnode = node.body[0]
doc = docnode.value.s
proc = Procedure(node.name, _larch=self, doc= doc,
body = node.body,
fname = self.fname,
lineno = self.lineno,
args = args,
kwargs = kwargs,
vararg = node.args.vararg,
varkws = node.args.kwarg)
self.symtable.set_symbol(node.name, value=proc)
# imports
def on_import(self, node): # ('names',)
"simple import"
for tnode in node.names:
self.import_module(tnode.name, asname=tnode.asname)
def on_importfrom(self, node): # ('module', 'names', 'level')
"import/from"
fromlist, asname = [], []
for tnode in node.names:
fromlist.append(tnode.name)
asname.append(tnode.asname)
self.import_module(node.module,
asname=asname, fromlist=fromlist)
def import_module(self, name, asname=None,
fromlist=None, do_reload=False):
"""
import a module (larch or python), installing it into the symbol table.
required arg:
name name of module to import
'foo' in 'import foo'
options:
fromlist list of symbols to import with 'from-import'
['x','y'] in 'from foo import x, y'
asname alias for imported name(s)
'bar' in 'import foo as bar'
or
['s','t'] in 'from foo import x as s, y as t'
this method covers a lot of cases (larch or python, import
or from-import, use of asname) and so is fairly long.
"""
st_sys = self.symtable._sys
for idir in st_sys.path:
if idir not in sys.path and os.path.exists(idir):
sys.path.append(idir)
# step 1 import the module to a global location
# either sys.modules for python modules
# or st_sys.modules for larch modules
# reload takes effect here in the normal python way:
if (do_reload or
((name not in st_sys.modules) and (name not in sys.modules))):
# first look for "name.lar"
# print('import_mod A ', name)
islarch = False
larchname = "%s.lar" % name
for dirname in st_sys.path:
if not os.path.exists(dirname):
continue
if larchname in os.listdir(dirname):
islarch = True
modname = os.path.abspath(os.path.join(dirname, larchname))
try:
thismod = builtins._run(filename=modname, _larch=self,
new_module=name)
except:
self.raise_exception(None, exc=ImportError, msg='Import Error')
# save current module group
# create new group, set as moduleGroup and localGroup
if len(self.error) > 0:
st_sys.modules.pop(name)
# thismod = None
return
# or, if not a larch module, load as a regular python module
if not islarch and name not in sys.modules:
try:
# print('import_mod: py import! ', name)
__import__(name)
thismod = sys.modules[name]
except:
self.raise_exception(None, exc=ImportError, msg='Import Error')
return
else: # previously loaded module, just do lookup
# print("prev loaded?")
if name in st_sys.modules:
thismod = st_sys.modules[name]
elif name in sys.modules:
thismod = sys.modules[name]
# now we install thismodule into the current moduleGroup
# import full module
if fromlist is None:
if asname is None:
asname = name
parts = asname.split('.')
asname = parts.pop()
targetgroup = st_sys.moduleGroup
while len(parts) > 0:
subname = parts.pop(0)
subgrp = Group()
setattr(targetgroup, subname, subgrp)
targetgroup = subgrp
setattr(targetgroup, asname, thismod)
# import-from construct
else:
if asname is None:
asname = [None]*len(fromlist)
targetgroup = st_sys.moduleGroup
for sym, alias in zip(fromlist, asname):
if alias is None:
alias = sym
setattr(targetgroup, alias, getattr(thismod, sym))
# end of import_module
| 37.286774 | 87 | 0.521498 | 28,384 | 0.940958 | 0 | 0 | 0 | 0 | 0 | 0 | 6,652 | 0.22052 |
f077d6be5215d3bb3ca0fa34f9524a7653266e11 | 4,422 | py | Python | deepinsight/util/tetrode.py | ealmenzar/DeepInsight | 99aebb90b8183aa9f028c35e0381e73f8cd840f3 | [
"MIT"
] | null | null | null | deepinsight/util/tetrode.py | ealmenzar/DeepInsight | 99aebb90b8183aa9f028c35e0381e73f8cd840f3 | [
"MIT"
] | null | null | null | deepinsight/util/tetrode.py | ealmenzar/DeepInsight | 99aebb90b8183aa9f028c35e0381e73f8cd840f3 | [
"MIT"
] | null | null | null | """
DeepInsight Toolbox
© Markus Frey
https://github.com/CYHSM/DeepInsight
Licensed under MIT License
"""
import numpy as np
import pandas as pd
import h5py
from . import hdf5
from . import stats
def read_open_ephys(fp_raw_file):
"""
Reads ST open ephys files
Parameters
----------
fp_raw_file : str
File path to open ephys file
Returns
-------
continouos : (N,M) array_like
Continous ephys with N timepoints and M channels
timestamps : (N,1) array_like
Timestamps for each sample in continous
positions : (N,5) array_like
Position of animal with two LEDs and timestamps
info : object
Additional information about experiments
"""
fid_ephys = h5py.File(fp_raw_file, mode='r')
# Load timestamps and continuous data, python 3 keys() returns view
recording_key = list(fid_ephys['acquisition']['timeseries'].keys())[0]
processor_key = list(fid_ephys['acquisition']['timeseries'][recording_key]['continuous'].keys())[0]
# Load raw ephys and timestamps
# not converted to microvolts, need to multiply by 0.195. We don't multiply here as we cant load full array into memory
continuous = fid_ephys['acquisition']['timeseries'][recording_key]['continuous'][processor_key]['data']
timestamps = fid_ephys['acquisition']['timeseries'][recording_key]['continuous'][processor_key]['timestamps']
# We can also read position directly from the raw file
positions = fid_ephys['acquisition']['timeseries'][recording_key]['tracking']['ProcessedPos']
# Read general settings
info = fid_ephys['general']['data_collection']['Settings']
return (continuous, timestamps, positions, info)
def read_tetrode_data(fp_raw_file):
"""
Read ST data from openEphys recording system
Parameters
----------
fp_raw_file : str
File path to open ephys file
Returns
-------
raw_data : (N,M) array_like
Continous ephys with N timepoints and M channels
raw_timestamps : (N,1) array_like
Timestamps for each sample in continous
output : (N,4) array_like
Position of animal with two LEDs
output_timestamps : (N,1) array_like
Timestamps for positions
info : object
Additional information about experiments
"""
(raw_data, raw_timestamps, positions, info) = read_open_ephys(fp_raw_file)
output_timestamps = positions[:, 0]
output = positions[:, 1:5]
bad_channels = info['General']['badChan']
bad_channels = [int(n) for n in bad_channels[()].decode('UTF-8').split(',')]
good_channels = np.delete(np.arange(0, 128), bad_channels)
info = {'channels': good_channels, 'bad_channels': bad_channels, 'sampling_rate': 30000}
return (raw_data, raw_timestamps, output, output_timestamps, info)
def preprocess_output(fp_hdf_out, raw_timestamps, output, output_timestamps, average_window=1000, sampling_rate=512):
"""
Write behaviours to decode into HDF5 file
Parameters
----------
fp_hdf_out : str
File path to HDF5 file
raw_timestamps : (N,1) array_like
Timestamps for each sample in continous
output : (N,1) array_like
Orientation
output_timestamps : (N,1) array_like
Timestamps for positions
average_window : int, optional
Downsampling factor for raw data and orientation, by default 1000
sampling_rate : int, optional
Sampling rate of raw ephys, by default 30000
"""
hdf5_file = h5py.File(fp_hdf_out, mode='a')
# Get size of wavelets
input_length = hdf5_file['inputs/wavelets'].shape[0]
# Get orientation and calculates alignment to the raw_data with downsampling factor average_window
raw_timestamps = raw_timestamps[()] # Slightly faster than np.array
output_orientation = np.interp(raw_timestamps[np.arange(0, raw_timestamps.shape[0],
average_window)], output_timestamps, output) #np.arange with average_window makes a reduction on the data
raw_orientation = np.array([output_orientation]).transpose()
# Create and save datasets in HDF5 File
hdf5.create_or_update(hdf5_file, dataset_name="outputs/raw_orientation",
dataset_shape=[input_length, 1], dataset_type=np.float16, dataset_value=raw_orientation[0: input_length, :])
hdf5_file.flush()
hdf5_file.close()
| 35.95122 | 161 | 0.685889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,597 | 0.587158 |
f0791e37af8f0e6bb45c78c7fc37667ac15c9e8a | 627 | py | Python | test/scripts/functions.py | JetBrains-Research/jpt-nb-corpus | d93ac84ff885b30ef736cd82f5ce8b09c28ef3d1 | [
"MIT"
] | 3 | 2022-03-25T10:17:22.000Z | 2022-03-27T14:13:03.000Z | test/scripts/functions.py | JetBrains-Research/Matroskin | 053ed3d7e9dffb0aee4012bc49a194e0c60217c7 | [
"MIT"
] | null | null | null | test/scripts/functions.py | JetBrains-Research/Matroskin | 053ed3d7e9dffb0aee4012bc49a194e0c60217c7 | [
"MIT"
] | 1 | 2021-07-06T16:22:11.000Z | 2021-07-06T16:22:11.000Z | # Explicit API functions
from api_functions import api_function1, api_function2
from package3 import api_function3
# API Packages
import package1, package2
import package3
from package4 import api_class1
# Defined functions
def defined_function_1(d_f_arg1, d_f_arg2):
a = api_function1(d_f_arg1)
b = (api_function2(d_f_arg2, d_f_arg1), api_function3())
def defined_function_2(d_f_arg1, d_f_arg2, d_f_arg3):
api_function1()
package1.p1_function1(d_f_arg1, d_f_arg2, d_f_arg3)
a, b = api_class1.cl1_function1(1, 2, '3')
def defined_function_3():
package1.p1_function1()
package3.p3_function1() | 24.115385 | 60 | 0.77193 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.095694 |
f07a7df9283116337443c3a5f4f80b400ad900a1 | 4,848 | py | Python | tests/data/test_make_dataset.py | dnsosa/drug-lit-contradictory-claims | c03faa7269050344b631b12302214a3175384e98 | [
"MIT"
] | null | null | null | tests/data/test_make_dataset.py | dnsosa/drug-lit-contradictory-claims | c03faa7269050344b631b12302214a3175384e98 | [
"MIT"
] | null | null | null | tests/data/test_make_dataset.py | dnsosa/drug-lit-contradictory-claims | c03faa7269050344b631b12302214a3175384e98 | [
"MIT"
] | null | null | null | """Tests for making datasets for contradictory-claims."""
# -*- coding: utf-8 -*-
import os
import unittest
from contradictory_claims.data.make_dataset import load_drug_virus_lexicons, load_mancon_corpus_from_sent_pairs, \
load_med_nli, load_multi_nli
from .constants import drug_lex_path, mancon_sent_pairs, mednli_dev_path, mednli_test_path, mednli_train_path, \
multinli_test_path, multinli_train_path, sample_drug_lex_path, sample_mancon_sent_pairs, \
sample_multinli_test_path, sample_multinli_train_path, sample_virus_lex_path, virus_lex_path
class TestMakeDataset(unittest.TestCase):
"""Tests for making datasets for contradictory-claims."""
@unittest.skip("This test can be used to check that datasets are found at the correct locations locally")
def test_find_files(self):
"""Test that input files are found properly."""
self.assertTrue(os.path.isfile(multinli_train_path),
"MultiNLI training data not found at {}".format(multinli_train_path))
self.assertTrue(os.path.isfile(multinli_test_path),
"MultiNLI test data not found at {}".format(multinli_test_path))
self.assertTrue(os.path.isfile(mednli_train_path),
"MedNLI training data not found at {}".format(mednli_train_path))
self.assertTrue(os.path.isfile(mednli_dev_path),
"MedNLI dev set data not found at {}".format(mednli_dev_path))
self.assertTrue(os.path.isfile(mednli_test_path),
"MedNLI test data not found at {}".format(mednli_test_path))
self.assertTrue(os.path.isfile(mancon_sent_pairs),
"ManConCorpus sentence pairs training data not found at {}".format(mancon_sent_pairs))
self.assertTrue(os.path.isfile(drug_lex_path),
"Drug lexicon not found at {}".format(drug_lex_path))
self.assertTrue(os.path.isfile(virus_lex_path),
"Virus lexicon not found at {}".format(virus_lex_path))
@unittest.skip("This test can be used locally to check that MultiNLI loads properly")
def test_load_multi_nli(self):
"""Test that MultiNLI is loaded as expected."""
x_train, y_train, x_test, y_test = load_multi_nli(multinli_train_path, multinli_test_path)
self.assertEqual(len(x_train), 391165)
self.assertEqual(y_train.shape, (391165, 3))
self.assertEqual(len(x_test), 9897)
self.assertEqual(y_test.shape, (9897, 3))
def test_load_multi_nli_sample(self):
"""Test that MultiNLI SAMPLE DATA are loaded as expected."""
x_train, y_train, x_test, y_test = load_multi_nli(sample_multinli_train_path, sample_multinli_test_path)
self.assertEqual(len(x_train), 49)
self.assertEqual(y_train.shape, (49, 3))
self.assertEqual(len(x_test), 49)
self.assertEqual(y_test.shape, (49, 3))
@unittest.skip("This test can be used locally to check that MedNLI loads properly")
def test_load_med_nli(self):
"""Test that MedNLI is loaded as expected."""
x_train, y_train, x_test, y_test = load_med_nli(mednli_train_path, mednli_dev_path, mednli_test_path)
self.assertEqual(len(x_train), 12627)
self.assertEqual(y_train.shape, (12627, 3))
self.assertEqual(len(x_test), 1422)
self.assertEqual(y_test.shape, (1422, 3))
@unittest.skip("This test can be used locally to check that ManConCorpus loads properly")
def test_load_mancon_corpus_from_sent_pairs(self):
"""Test that ManConCorpus is loaded as expected."""
x_train, y_train, x_test, y_test = load_mancon_corpus_from_sent_pairs(mancon_sent_pairs)
self.assertEqual(len(x_train), 14328)
self.assertEqual(y_train.shape, (14328, 3))
self.assertEqual(len(x_test), 3583)
self.assertEqual(y_test.shape, (3583, 3))
def test_load_mancon_corpus_from_sent_pairs_sample(self):
"""Test that ManConCorpus is loaded as expected."""
x_train, y_train, x_test, y_test = load_mancon_corpus_from_sent_pairs(sample_mancon_sent_pairs)
self.assertEqual(len(x_train), 39)
self.assertEqual(y_train.shape, (39, 3))
self.assertEqual(len(x_test), 10)
self.assertEqual(y_test.shape, (10, 3))
def test_load_drug_virus_lexicons(self):
"""Test that the virus and drug lexicons are loaded properly."""
drug_names, virus_names = load_drug_virus_lexicons(sample_drug_lex_path, sample_virus_lex_path)
drugs = ["hydroxychloroquine", "remdesivir", "ritonavir", "chloroquine", "lopinavir"]
virus_syns = ["COVID-19", "SARS-CoV-2", "Coronavirus Disease 2019"]
self.assertTrue(set(drugs).issubset(set(drug_names)))
self.assertTrue(set(virus_syns).issubset(set(virus_names)))
| 49.469388 | 114 | 0.697401 | 4,280 | 0.882838 | 0 | 0 | 2,811 | 0.579827 | 0 | 0 | 1,220 | 0.25165 |
f07b043e271471be2b35cb22503d63d12af2440e | 3,111 | py | Python | detection.py | aar0npham/FuryColorDetection | 5a8ae6a5f9dc8ccf42c78d4f29038e4e6889a858 | [
"Apache-2.0"
] | null | null | null | detection.py | aar0npham/FuryColorDetection | 5a8ae6a5f9dc8ccf42c78d4f29038e4e6889a858 | [
"Apache-2.0"
] | null | null | null | detection.py | aar0npham/FuryColorDetection | 5a8ae6a5f9dc8ccf42c78d4f29038e4e6889a858 | [
"Apache-2.0"
] | null | null | null | # import modules
import numpy as np
import argparse
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the colors in the HSV color space
lower = {'red': (166, 84, 141), 'green': (66, 122, 129), 'blue': (97, 100, 117), 'yellow': (23, 59, 119), 'orange': (0, 50, 80)} # assign new item lower['blue'] = (93, 10, 0)
upper = {'red': (186, 255, 255), 'green': (86, 255, 255), 'blue': (117, 255, 255), 'yellow': (54, 255, 255), 'orange': (20, 255, 255)}
# define standard colors for circle around the object
colors = {'red': (0, 0, 255), 'green': (0, 255, 0), 'blue': (255, 0, 0), 'yellow': (0, 255, 217), 'orange': (0, 140, 255)}
camera = cv2.VideoCapture(0 + cv2.CAP_DSHOW)
# keep looping
while True:
# grab the current frame
(grabbed, frame) = camera.read()
# resize the frame, blur it, and convert it to the HSV
# color space
frame = cv2.resize(frame, (640, 480))
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# for each color in dictionary check object in frame
for key, value in upper.items():
# construct a mask for the color from dictionary`1, then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
kernel = np.ones((9, 9), np.uint8)
mask = cv2.inRange(hsv, lower[key], upper[key])
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size. Correct this value for your obect's size
if radius > 0.5:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius), colors[key], 2)
cv2.putText(frame, key, (int(x - radius), int(y - radius)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, colors[key], 2)
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
| 40.402597 | 175 | 0.603664 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,237 | 0.397621 |
f07bcc1be66ad63b427b651f681533f05db82f52 | 430 | py | Python | topics/migrations/0003_topic_word.py | acdh-oeaw/mmp | 7ef8f33eafd3a7985328d374130f1cbe31f77df0 | [
"MIT"
] | 2 | 2021-06-02T11:27:54.000Z | 2021-08-25T10:29:04.000Z | topics/migrations/0003_topic_word.py | acdh-oeaw/mmp | 7ef8f33eafd3a7985328d374130f1cbe31f77df0 | [
"MIT"
] | 86 | 2021-01-29T12:31:34.000Z | 2022-03-28T11:41:04.000Z | topics/migrations/0003_topic_word.py | acdh-oeaw/mmp | 7ef8f33eafd3a7985328d374130f1cbe31f77df0 | [
"MIT"
] | null | null | null | # Generated by Django 3.2 on 2021-10-21 19:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('topics', '0002_alter_modelingprocess_modeling_type'),
]
operations = [
migrations.AddField(
model_name='topic',
name='word',
field=models.JSONField(default='{}'),
preserve_default=False,
),
]
| 21.5 | 63 | 0.597674 | 339 | 0.788372 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.260465 |
f07d5c996cff48d3e4ce4edaac97743f3de1a7ce | 171 | py | Python | src/ihtt/__init__.py | dekoza/i-hate-time-tracking | adb6018b56c836317535f2e2346dfb8d9cce3aac | [
"Apache-2.0"
] | null | null | null | src/ihtt/__init__.py | dekoza/i-hate-time-tracking | adb6018b56c836317535f2e2346dfb8d9cce3aac | [
"Apache-2.0"
] | null | null | null | src/ihtt/__init__.py | dekoza/i-hate-time-tracking | adb6018b56c836317535f2e2346dfb8d9cce3aac | [
"Apache-2.0"
] | null | null | null | """
I Hate Time Tracking package.
Get time tracking out of your way.
"""
from typing import List
__all__: List[str] = [] # noqa: WPS410 (the only __variable__ we use)
| 17.1 | 70 | 0.695906 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.690058 |
f07e0ced31d9f3b5a75c59dd3ef793ba14212ab0 | 2,831 | py | Python | tests/base.py | octue/octue-sdk-python | 31c6e9358d3401ca708f5b3da702bfe3be3e52ce | [
"MIT"
] | 5 | 2020-10-01T12:43:10.000Z | 2022-03-14T17:26:25.000Z | tests/base.py | octue/octue-sdk-python | 31c6e9358d3401ca708f5b3da702bfe3be3e52ce | [
"MIT"
] | 322 | 2020-06-24T15:55:22.000Z | 2022-03-30T11:49:28.000Z | tests/base.py | octue/octue-sdk-python | 31c6e9358d3401ca708f5b3da702bfe3be3e52ce | [
"MIT"
] | null | null | null | import os
import subprocess
import unittest
import uuid
import warnings
from tempfile import TemporaryDirectory, gettempdir
from octue.cloud.emulators import GoogleCloudStorageEmulatorTestResultModifier
from octue.mixins import MixinBase, Pathable
from octue.resources import Datafile, Dataset, Manifest
from tests import TEST_BUCKET_NAME
class MyPathable(Pathable, MixinBase):
pass
class BaseTestCase(unittest.TestCase):
"""Base test case for twined:
- sets a path to the test data directory
"""
test_result_modifier = GoogleCloudStorageEmulatorTestResultModifier(default_bucket_name=TEST_BUCKET_NAME)
setattr(unittest.TestResult, "startTestRun", test_result_modifier.startTestRun)
setattr(unittest.TestResult, "stopTestRun", test_result_modifier.stopTestRun)
def setUp(self):
# Set up paths to the test data directory and to the app templates directory
root_dir = os.path.dirname(os.path.abspath(__file__))
self.data_path = os.path.join(root_dir, "data")
self.templates_path = os.path.join(os.path.dirname(root_dir), "octue", "templates")
# Make unittest ignore excess ResourceWarnings so tests' console outputs are clearer. This has to be done even
# if these warnings are ignored elsewhere as unittest forces warnings to be displayed by default.
warnings.simplefilter("ignore", category=ResourceWarning)
super().setUp()
def callCli(self, args):
"""Utility to call the octue CLI (eg for a templated example) in a separate subprocess
Enables testing that multiple processes aren't using the same memory space, or for running multiple apps in
parallel to ensure they don't conflict
"""
call_id = str(uuid.uuid4())
tmp_dir_name = os.path.join(gettempdir(), "octue-sdk-python", f"test-{call_id}")
with TemporaryDirectory(dir=tmp_dir_name):
subprocess.call(args, cwd=tmp_dir_name)
def create_valid_dataset(self):
""" Create a valid dataset with two valid datafiles (they're the same file in this case). """
path_from = MyPathable(path=os.path.join(self.data_path, "basic_files", "configuration", "test-dataset"))
path = os.path.join("path-within-dataset", "a_test_file.csv")
files = [
Datafile(path_from=path_from, path=path, skip_checks=False),
Datafile(path_from=path_from, path=path, skip_checks=False),
]
return Dataset(files=files)
def create_valid_manifest(self):
""" Create a valid manifest with two valid datasets (they're the same dataset in this case). """
datasets = [self.create_valid_dataset(), self.create_valid_dataset()]
manifest = Manifest(datasets=datasets, keys={"my_dataset": 0, "another_dataset": 1})
return manifest
| 42.253731 | 118 | 0.716001 | 2,485 | 0.877782 | 0 | 0 | 0 | 0 | 0 | 0 | 1,018 | 0.35959 |
f07eec804d533d3b03eb1442655922fd39f8fdb2 | 6,457 | py | Python | sstcam_sandbox/d190717_alpha/plot_wobble_animation_goldfish.py | watsonjj/CHECLabPySB | 91330d3a6f510a392f635bd7f4abd2f77871322c | [
"BSD-3-Clause"
] | null | null | null | sstcam_sandbox/d190717_alpha/plot_wobble_animation_goldfish.py | watsonjj/CHECLabPySB | 91330d3a6f510a392f635bd7f4abd2f77871322c | [
"BSD-3-Clause"
] | null | null | null | sstcam_sandbox/d190717_alpha/plot_wobble_animation_goldfish.py | watsonjj/CHECLabPySB | 91330d3a6f510a392f635bd7f4abd2f77871322c | [
"BSD-3-Clause"
] | 1 | 2021-03-30T09:46:56.000Z | 2021-03-30T09:46:56.000Z | from CHECLabPy.plotting.setup import Plotter
from CHECLabPy.plotting.camera import CameraImage
from CHECLabPy.utils.files import create_directory
from CHECLabPy.utils.mapping import get_ctapipe_camera_geometry
from sstcam_sandbox import get_plot, get_data
from os.path import join
from matplotlib import pyplot as plt
from tqdm import tqdm
import numpy as np
import pandas as pd
import warnings
from CHECOnsky.calib import obtain_cleaning_mask
from CHECLabPy.calib import TimeCalibrator
from mpl_toolkits.axes_grid1 import make_axes_locatable
from IPython import embed
def colorbar(mappable, label):
ax = mappable.axes
fig = ax.figure
divider = make_axes_locatable(ax)
_ = divider.append_axes("right", size="10%", pad=0.15)
cax = divider.append_axes("right", size="10%", pad=0.15)
return fig.colorbar(mappable, label=label, cax=cax, aspect=20)
class CameraMovie(Plotter):
def __init__(self, mapping, output_path):
super().__init__()
self.fig = plt.figure(figsize=(8, 3))
self.ax_goldfish = self.fig.add_axes([0, 0, 0.4, 1])
self.ax_image = self.fig.add_axes([0.4, 0, 0.4, 1])
self.ax_cb = self.fig.add_axes([0.68, 0, 0.15, 1])
self.ax_image.patch.set_alpha(0)
self.ax_cb.patch.set_alpha(0)
self.ax_cb.axis('off')
self.ci_image = CameraImage.from_mapping(mapping, ax=self.ax_image)
self.ci_image.add_colorbar(
"Pixel Amplitude (p.e.)", ax=self.ax_cb, pad=-0.5
)
self.ci_goldfish = CameraImage.from_mapping(mapping, ax=self.ax_goldfish)
self.output_path = output_path
self.source_point_image = None
self.source_point_goldfish = None
self.source_label_image = None
self.source_label_goldfish = None
self.alpha_line = None
self.timestamp = None
self.iframe = 0
def set_source_position(self, x_src, y_src):
offset = 0.004
if self.source_point_image is None:
self.source_point_image, = self.ax_image.plot(
x_src, y_src, 'x', c='red'
)
self.source_label_image = self.ax_image.text(
x_src+offset, y_src+offset, "Mrk421", color='red', size=10
)
else:
self.source_point_image.set_xdata(x_src)
self.source_point_image.set_ydata(y_src)
self.source_label_image.set_position((x_src+offset, y_src+offset))
if self.source_point_goldfish is None:
self.source_point_goldfish, = self.ax_goldfish.plot(
x_src, y_src, 'x', c='red'
)
self.source_label_goldfish = self.ax_goldfish.text(
x_src+offset, y_src+offset, "Mrk421", color='red', size=10
)
else:
self.source_point_goldfish.set_xdata(x_src)
self.source_point_goldfish.set_ydata(y_src)
self.source_label_goldfish.set_position((x_src+offset, y_src+offset))
def set_timestamp(self, timestamp):
timestamp_str = str(timestamp)
timestamp_len = len(timestamp_str)
missing = 29 - timestamp_len
timestamp_str += "0" * missing
if self.timestamp is None:
self.timestamp = self.fig.text(
0.4, -0.1, timestamp_str, horizontalalignment='center', size=12
)
else:
self.timestamp.set_text(timestamp_str)
def set_image(self, image, min_=None, max_=None):
self.ci_image.image = image
self.ci_image.set_limits_minmax(min_, max_)
def set_goldfish(self, slice, min_=None, max_=None):
self.ci_goldfish.image = slice
self.ci_goldfish.set_limits_minmax(min_, max_)
def set_alpha_line(self, cog_x, cog_y, psi):
y_min, y_max = self.ax_image.get_ylim()
x_min = cog_x - (cog_y - y_min) / np.tan(psi)
x_max = cog_x - (cog_y - y_max) / np.tan(psi)
if self.alpha_line is None:
self.alpha_line, = self.ax_image.plot(
[x_min, x_max], [y_min, y_max], ls="--", c='red'
)
else:
self.alpha_line.set_xdata([x_min, x_max])
self.alpha_line.set_ydata([y_min, y_max])
def save_frame(self):
path = self.output_path.format(self.iframe)
self.fig.savefig(path, bbox_inches='tight')
self.iframe += 1
def main():
path = get_data("d190717_alpha/wobble.h5")
with pd.HDFStore(path, mode='r') as store:
df = store['data'].loc[::4]
mapping = store['mapping']
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
mapping.metadata = store.get_storer('mapping').attrs.metadata
tc = TimeCalibrator()
geom = get_ctapipe_camera_geometry(mapping)
n_row = df.index.size
p_camera = CameraMovie(mapping, get_plot(
"d190717_alpha/wobble_animation_goldfish/frames/{:04d}.png"
))
for _, row in tqdm(df.iterrows(), total=n_row):
timestamp = row['timestamp']
iobs = row['iobs']
iev = row['iev']
x_src = row['x_src']
y_src = row['y_src']
dl1 = row['dl1'].values
time = row['dl1_pulse_time'].values
r1 = row['r1']
x_cog = row['x_cog']
y_cog = row['y_cog']
psi = row['psi']
p_camera.set_source_position(x_src, y_src)
n_pixels, n_samples = r1.shape
shifted = tc(r1)
mask = obtain_cleaning_mask(geom, dl1, time)
if not mask.any():
msg = f"No pixels survived cleaning for: RUN {iobs} IEV {iev}"
print(msg)
continue
# raise ValueError(msg)
dl1_ma = np.ma.masked_array(dl1, mask=~mask)
min_pixel = dl1_ma.argmin()
max_pixel = dl1_ma.argmax()
min_image = -4
max_image = 0.7 * dl1.max()
min_gf = shifted[max_pixel, :20].min()
max_gf = shifted[max_pixel].max() * 0.8
st = int(np.min(time[mask]) - 3)
et = int(np.max(time[mask]) + 6)
st = st if st > 0 else 0
et = et if et < n_samples else n_samples
# embed()
p_camera.set_image(dl1, min_image, max_image)
for t in range(st, et, 3):
slice_ = shifted[:, t]
p_camera.set_timestamp(timestamp + pd.Timedelta(f"{t}ns"))
p_camera.set_goldfish(slice_, min_gf, max_gf)
p_camera.save_frame()
if __name__ == '__main__':
main()
| 34.715054 | 81 | 0.61654 | 3,482 | 0.53926 | 0 | 0 | 0 | 0 | 0 | 0 | 427 | 0.06613 |
f07f197c27b7ad864308c5332ee3a30042155d95 | 15,797 | py | Python | tempest/cmd/run.py | Juniper/tempest | f8316c9c28e029063c036e1cf83947af068e7703 | [
"Apache-2.0"
] | null | null | null | tempest/cmd/run.py | Juniper/tempest | f8316c9c28e029063c036e1cf83947af068e7703 | [
"Apache-2.0"
] | null | null | null | tempest/cmd/run.py | Juniper/tempest | f8316c9c28e029063c036e1cf83947af068e7703 | [
"Apache-2.0"
] | 5 | 2016-06-24T20:03:52.000Z | 2020-02-05T10:14:54.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Runs tempest tests
This command is used for running the tempest tests
Test Selection
==============
Tempest run has several options:
* **--regex/-r**: This is a selection regex like what testr uses. It will run
any tests that match on re.match() with the regex
* **--smoke/-s**: Run all the tests tagged as smoke
There are also the **--blacklist-file** and **--whitelist-file** options that
let you pass a filepath to tempest run with the file format being a line
separated regex, with '#' used to signify the start of a comment on a line.
For example::
# Regex file
^regex1 # Match these tests
.*regex2 # Match those tests
The blacklist file will be used to construct a negative lookahead regex and
the whitelist file will simply OR all the regexes in the file. The whitelist
and blacklist file options are mutually exclusive so you can't use them
together. However, you can combine either with a normal regex or the *--smoke*
flag. When used with a blacklist file the generated regex will be combined to
something like::
^((?!black_regex1|black_regex2).)*$cli_regex1
When combined with a whitelist file all the regexes from the file and the CLI
regexes will be ORed.
You can also use the **--list-tests** option in conjunction with selection
arguments to list which tests will be run.
You can also use the **--load-list** option that lets you pass a filepath to
tempest run with the file format being in a non-regex format, similar to the
tests generated by the **--list-tests** option. You can specify target tests
by removing unnecessary tests from a list file which is generated from
**--list-tests** option.
Test Execution
==============
There are several options to control how the tests are executed. By default
tempest will run in parallel with a worker for each CPU present on the machine.
If you want to adjust the number of workers use the **--concurrency** option
and if you want to run tests serially use **--serial/-t**
Running with Workspaces
-----------------------
Tempest run enables you to run your tempest tests from any setup tempest
workspace it relies on you having setup a tempest workspace with either the
``tempest init`` or ``tempest workspace`` commands. Then using the
``--workspace`` CLI option you can specify which one of your workspaces you
want to run tempest from. Using this option you don't have to run Tempest
directly with you current working directory being the workspace, Tempest will
take care of managing everything to be executed from there.
Running from Anywhere
---------------------
Tempest run provides you with an option to execute tempest from anywhere on
your system. You are required to provide a config file in this case with the
``--config-file`` option. When run tempest will create a .testrepository
directory and a .testr.conf file in your current working directory. This way
you can use testr commands directly to inspect the state of the previous run.
Test Output
===========
By default tempest run's output to STDOUT will be generated using the
subunit-trace output filter. But, if you would prefer a subunit v2 stream be
output to STDOUT use the **--subunit** flag
Combining Runs
==============
There are certain situations in which you want to split a single run of tempest
across 2 executions of tempest run. (for example to run part of the tests
serially and others in parallel) To accomplish this but still treat the results
as a single run you can leverage the **--combine** option which will append
the current run's results with the previous runs.
"""
import io
import os
import sys
import tempfile
import threading
from cliff import command
from os_testr import regex_builder
from os_testr import subunit_trace
from oslo_serialization import jsonutils as json
import six
from testrepository.commands import run_argv
from tempest import clients
from tempest.cmd import cleanup_service
from tempest.cmd import init
from tempest.cmd import workspace
from tempest.common import credentials_factory as credentials
from tempest import config
CONF = config.CONF
SAVED_STATE_JSON = "saved_state.json"
class TempestRun(command.Command):
def _set_env(self, config_file=None):
if config_file:
CONF.set_config_path(os.path.abspath(config_file))
# NOTE(mtreinish): This is needed so that testr doesn't gobble up any
# stacktraces on failure.
if 'TESTR_PDB' in os.environ:
return
else:
os.environ["TESTR_PDB"] = ""
# NOTE(dims): most of our .testr.conf try to test for PYTHON
# environment variable and fall back to "python", under python3
# if it does not exist. we should set it to the python3 executable
# to deal with this situation better for now.
if six.PY3 and 'PYTHON' not in os.environ:
os.environ['PYTHON'] = sys.executable
def _create_testrepository(self):
if not os.path.isdir('.testrepository'):
returncode = run_argv(['testr', 'init'], sys.stdin, sys.stdout,
sys.stderr)
if returncode:
sys.exit(returncode)
def _create_testr_conf(self):
top_level_path = os.path.dirname(os.path.dirname(__file__))
discover_path = os.path.join(top_level_path, 'test_discover')
file_contents = init.TESTR_CONF % (top_level_path, discover_path)
with open('.testr.conf', 'w+') as testr_conf_file:
testr_conf_file.write(file_contents)
def take_action(self, parsed_args):
returncode = 0
if parsed_args.config_file:
self._set_env(parsed_args.config_file)
else:
self._set_env()
# Workspace execution mode
if parsed_args.workspace:
workspace_mgr = workspace.WorkspaceManager(
parsed_args.workspace_path)
path = workspace_mgr.get_workspace(parsed_args.workspace)
if not path:
sys.exit(
"The %r workspace isn't registered in "
"%r. Use 'tempest init' to "
"register the workspace." %
(parsed_args.workspace, workspace_mgr.path))
os.chdir(path)
# NOTE(mtreinish): tempest init should create a .testrepository dir
# but since workspaces can be imported let's sanity check and
# ensure that one is created
self._create_testrepository()
# Local execution mode
elif os.path.isfile('.testr.conf'):
# If you're running in local execution mode and there is not a
# testrepository dir create one
self._create_testrepository()
# local execution with config file mode
elif parsed_args.config_file:
self._create_testr_conf()
self._create_testrepository()
else:
print("No .testr.conf file was found for local execution")
sys.exit(2)
if parsed_args.state:
self._init_state()
else:
pass
if parsed_args.combine:
temp_stream = tempfile.NamedTemporaryFile()
return_code = run_argv(['tempest', 'last', '--subunit'], sys.stdin,
temp_stream, sys.stderr)
if return_code > 0:
sys.exit(return_code)
regex = self._build_regex(parsed_args)
if parsed_args.list_tests:
argv = ['tempest', 'list-tests', regex]
returncode = run_argv(argv, sys.stdin, sys.stdout, sys.stderr)
else:
options = self._build_options(parsed_args)
returncode = self._run(regex, options)
if returncode > 0:
sys.exit(returncode)
if parsed_args.combine:
return_code = run_argv(['tempest', 'last', '--subunit'], sys.stdin,
temp_stream, sys.stderr)
if return_code > 0:
sys.exit(return_code)
returncode = run_argv(['tempest', 'load', temp_stream.name],
sys.stdin, sys.stdout, sys.stderr)
sys.exit(returncode)
def get_description(self):
return 'Run tempest'
def _init_state(self):
print("Initializing saved state.")
data = {}
self.global_services = cleanup_service.get_global_cleanup_services()
self.admin_mgr = clients.Manager(
credentials.get_configured_admin_credentials())
admin_mgr = self.admin_mgr
kwargs = {'data': data,
'is_dry_run': False,
'saved_state_json': data,
'is_preserve': False,
'is_save_state': True}
for service in self.global_services:
svc = service(admin_mgr, **kwargs)
svc.run()
with open(SAVED_STATE_JSON, 'w+') as f:
f.write(json.dumps(data,
sort_keys=True, indent=2, separators=(',', ': ')))
def get_parser(self, prog_name):
parser = super(TempestRun, self).get_parser(prog_name)
parser = self._add_args(parser)
return parser
def _add_args(self, parser):
# workspace args
parser.add_argument('--workspace', default=None,
help='Name of tempest workspace to use for running'
' tests. You can see a list of workspaces '
'with tempest workspace list')
parser.add_argument('--workspace-path', default=None,
dest='workspace_path',
help="The path to the workspace file, the default "
"is ~/.tempest/workspace.yaml")
# Configuration flags
parser.add_argument('--config-file', default=None, dest='config_file',
help='Configuration file to run tempest with')
# test selection args
regex = parser.add_mutually_exclusive_group()
regex.add_argument('--smoke', '-s', action='store_true',
help="Run the smoke tests only")
regex.add_argument('--regex', '-r', default='',
help='A normal testr selection regex used to '
'specify a subset of tests to run')
list_selector = parser.add_mutually_exclusive_group()
list_selector.add_argument('--whitelist-file', '--whitelist_file',
help="Path to a whitelist file, this file "
"contains a separate regex on each "
"newline.")
list_selector.add_argument('--blacklist-file', '--blacklist_file',
help='Path to a blacklist file, this file '
'contains a separate regex exclude on '
'each newline')
list_selector.add_argument('--load-list', '--load_list',
help='Path to a non-regex whitelist file, '
'this file contains a seperate test '
'on each newline. This command'
'supports files created by the tempest'
'run ``--list-tests`` command')
# list only args
parser.add_argument('--list-tests', '-l', action='store_true',
help='List tests',
default=False)
# execution args
parser.add_argument('--concurrency', '-w',
help="The number of workers to use, defaults to "
"the number of cpus")
parallel = parser.add_mutually_exclusive_group()
parallel.add_argument('--parallel', dest='parallel',
action='store_true',
help='Run tests in parallel (this is the'
' default)')
parallel.add_argument('--serial', '-t', dest='parallel',
action='store_false',
help='Run tests serially')
parser.add_argument('--save-state', dest='state',
action='store_true',
help="To save the state of the cloud before "
"running tempest.")
# output args
parser.add_argument("--subunit", action='store_true',
help='Enable subunit v2 output')
parser.add_argument("--combine", action='store_true',
help='Combine the output of this run with the '
"previous run's as a combined stream in the "
"testr repository after it finish")
parser.set_defaults(parallel=True)
return parser
def _build_regex(self, parsed_args):
regex = ''
if parsed_args.smoke:
regex = 'smoke'
elif parsed_args.regex:
regex = parsed_args.regex
if parsed_args.whitelist_file or parsed_args.blacklist_file:
regex = regex_builder.construct_regex(parsed_args.blacklist_file,
parsed_args.whitelist_file,
regex, False)
return regex
def _build_options(self, parsed_args):
options = []
if parsed_args.subunit:
options.append("--subunit")
if parsed_args.parallel:
options.append("--parallel")
if parsed_args.concurrency:
options.append("--concurrency=%s" % parsed_args.concurrency)
if parsed_args.load_list:
options.append("--load-list=%s" % parsed_args.load_list)
return options
def _run(self, regex, options):
returncode = 0
argv = ['tempest', 'run', regex] + options
if '--subunit' in options:
returncode = run_argv(argv, sys.stdin, sys.stdout, sys.stderr)
else:
argv.append('--subunit')
stdin = io.StringIO()
stdout_r, stdout_w = os.pipe()
subunit_w = os.fdopen(stdout_w, 'wt')
subunit_r = os.fdopen(stdout_r)
returncodes = {}
def run_argv_thread():
returncodes['testr'] = run_argv(argv, stdin, subunit_w,
sys.stderr)
subunit_w.close()
run_thread = threading.Thread(target=run_argv_thread)
run_thread.start()
returncodes['subunit-trace'] = subunit_trace.trace(
subunit_r, sys.stdout, post_fails=True, print_failures=True)
run_thread.join()
subunit_r.close()
# python version of pipefail
if returncodes['testr']:
returncode = returncodes['testr']
elif returncodes['subunit-trace']:
returncode = returncodes['subunit-trace']
return returncode
| 43.043597 | 79 | 0.599734 | 11,102 | 0.702792 | 0 | 0 | 0 | 0 | 0 | 0 | 7,023 | 0.444578 |
f07f1c21b8f06d89cde1866e0e0a9e2404549ae4 | 10,586 | py | Python | src/python/vrprim/photosphere/conv.py | cmbruns/vr_samples | 8dee056766bccca1a602c6dd58fd0a641c5033a5 | [
"MIT"
] | 1 | 2017-01-29T21:15:23.000Z | 2017-01-29T21:15:23.000Z | src/python/vrprim/photosphere/conv.py | cmbruns/vr_samples | 8dee056766bccca1a602c6dd58fd0a641c5033a5 | [
"MIT"
] | 2 | 2017-01-29T20:34:39.000Z | 2017-01-29T23:26:05.000Z | src/python/vrprim/photosphere/conv.py | cmbruns/vr_samples | 8dee056766bccca1a602c6dd58fd0a641c5033a5 | [
"MIT"
] | null | null | null | """
Convert spherical panorama in equirectangular format into cubemap format
"""
from math import pi, log2
import numpy
from libtiff import TIFF
import png
import glfw
from OpenGL import GL
from OpenGL.GL import shaders
from OpenGL.GL.EXT.texture_filter_anisotropic import GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, GL_TEXTURE_MAX_ANISOTROPY_EXT
from PIL import Image
class Converter(object):
def render_scene(self):
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glUseProgram(self.shader)
equirect_loc = GL.glGetUniformLocation(self.shader, "equirect")
GL.glUniform1i(equirect_loc, 0)
GL.glDrawArrays(GL.GL_TRIANGLE_STRIP, 0, 4)
def cube_from_equirect(self, arr):
"""
Use OpenGL to efficiently warp an equirectangular image into
a single cubemap image
"""
# Set up glfw
eh = arr.shape[0]
ew = arr.shape[1]
print(ew, eh)
# Cubemap has same width, and height * 1.5, right? todo:
scale = 4.0 / pi # tan(a)/a [a == 45 degrees] # so cube face center resolution matches equirectangular equator resolution
# scale = 1.0
# scale *= 1.0 / 4.0 # optional: smaller for faster testing
tile_size = int(scale * ew / 4.0)
# optional: clip to nearest power of two subtile size
tile_size = int(pow(2.0, int(log2(tile_size))))
print("tile size = ", tile_size, " pixels")
cw = 4 * tile_size
ch = 3 * tile_size
print(cw, ch)
glfw.init()
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 4)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 5)
# glfw.window_hint(glfw.VISIBLE, False)
w = glfw.create_window(cw, ch, "Cubemap", None, None)
# Create a framebuffer and render cube_color_texture
glfw.make_context_current(w)
vao = GL.glGenVertexArrays(1)
GL.glBindVertexArray(vao)
fb = GL.glGenFramebuffers(1)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, fb)
cube_color_tex = GL.glGenTextures(1)
if arr.dtype == numpy.uint16:
gl_type = GL.GL_UNSIGNED_SHORT
cube_internal_format = GL.GL_RGBA16
input_internal_format = GL.GL_RGB16
elif arr.dtype == numpy.uint8:
gl_type = GL.GL_UNSIGNED_BYTE
cube_internal_format = GL.GL_RGBA8
input_internal_format = GL.GL_RGB8
else:
raise
GL.glBindTexture(GL.GL_TEXTURE_2D, cube_color_tex)
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, cube_internal_format, cw, ch, 0, GL.GL_RGBA, gl_type, None)
GL.glFramebufferTexture(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, cube_color_tex, 0)
GL.glDrawBuffers([GL.GL_COLOR_ATTACHMENT0,])
if GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER) != GL.GL_FRAMEBUFFER_COMPLETE:
raise "Incomplete framebuffer"
else:
print("Framebuffer OK")
# Create shader program
vtx = shaders.compileShader("""#version 450
#line 62
out vec2 tex_coord;
const vec4 SCREEN_QUAD[4] = vec4[4](
vec4(-1, -1, 0.5, 1),
vec4( 1, -1, 0.5, 1),
vec4(-1, 1, 0.5, 1),
vec4( 1, 1, 0.5, 1));
void main() {
vec4 c = SCREEN_QUAD[gl_VertexID]; // corner location
gl_Position = c;
tex_coord = 0.5 * (c.xy + vec2(1));
}
""", GL.GL_VERTEX_SHADER)
frg = shaders.compileShader("""#version 450
#line 79
layout(binding=0) uniform sampler2D equirect;
in vec2 tex_coord;
out vec4 frag_color;
const float PI = 3.14159265359;
vec3 xyz_from_equirect(in vec2 eq) {
vec2 c = 2*eq - vec2(1); // centered
float lon = PI * c.x;
float lat = -0.5 * PI * c.y;
float s = cos(lat);
return vec3(s*sin(lon), sin(lat), -s*cos(lon));
}
vec2 equirect_from_xyz(in vec3 xyz) {
float r = length(xyz.xz);
float lat = atan(xyz.y, r);
float lon = atan(xyz.x, -xyz.z);
return 0.5 * (vec2(lon / PI, -2.0 * lat / PI) + vec2(1));
}
vec3 xyz_from_cube(in vec2 cube) {
if (cube.y > 2.0/3.0) { // lower strip
if (cube.x < 1.0/4.0) {
discard;
}
else if (cube.x > 2.0/4.0) {
discard;
}
else {
vec2 xy = (cube - vec2(3.0/8.0, 5.0/6.0)) * vec2(8, -6);
return normalize(vec3(xy.x, -1, -xy.y)); // bottom
}
}
else if (cube.y < 1.0/3.0) { // upper strip
if (cube.x < 1.0/4.0) {
discard;
}
else if (cube.x > 2.0/4.0) {
discard;
}
else { // top
vec2 xy = (cube - vec2(3.0/8.0, 1.0/6.0)) * vec2(8, -6);
return normalize(vec3(xy.x, 1, xy.y));
}
}
else { // central strip
if (cube.x < 0.25) {
vec2 xy = (cube - vec2(1.0/8.0, 0.5)) * vec2(8, -6);
return normalize(vec3(-1, xy.y, -xy.x)); // left
}
else if (cube.x < 0.50) { // front
vec2 xy = (cube - vec2(3.0/8.0, 0.5)) * vec2(8, -6);
return normalize(vec3(xy.x, xy.y, -1));
}
else if (cube.x < 0.75) { // right
vec2 xy = (cube - vec2(5.0/8.0, 0.5)) * vec2(8, -6);
return normalize(vec3(1, xy.y, xy.x));
}
else { // back
vec2 xy = (cube - vec2(7.0/8.0, 0.5)) * vec2(8, -6);
return normalize(vec3(-xy.x, xy.y, 1));
}
}
}
void main() {
vec3 xyz = xyz_from_cube(tex_coord);
vec2 eq = equirect_from_xyz(xyz);
// Use explicit level of detail to avoid seam at z==1, lon==PI
// Use explicit gradients, to preserve anisotropic filtering during mipmap lookup
vec2 dpdx = dFdx(eq);
if (dpdx.x > 0.5) dpdx.x -= 1; // use "repeat" wrapping on gradient
if (dpdx.x < -0.5) dpdx.x += 1;
vec2 dpdy = dFdy(eq);
frag_color = textureGrad(equirect, eq, dpdx, dpdy);
// frag_color = vec4(eq, 0.5, 1);
// frag_color = vec4(xyz, 1);
// frag_color = vec4(tex_coord, 1, 1);
// frag_color = vec4(xyz_from_equirect(tex_coord), 1);
}
""", GL.GL_FRAGMENT_SHADER)
self.shader = shaders.compileProgram(vtx, frg)
# Bind the input equirectangular image
equi_tex = GL.glGenTextures(1)
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, equi_tex)
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, input_internal_format, ew, eh, 0, GL.GL_RGB, gl_type, arr)
aniso = GL.glGetFloatv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL_TEXTURE_MAX_ANISOTROPY_EXT, aniso)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT);
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_MIRRORED_REPEAT);
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR);
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR);
GL.glGenerateMipmap(GL.GL_TEXTURE_2D)
# init
GL.glDisable(GL.GL_BLEND)
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glViewport(0, 0, cw, ch)
GL.glClearColor(0.5, 0.5, 0.5, 0.0)
# Render the image
bToScreen = False
if bToScreen:
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
# while not glfw.window_should_close(w):
for _ in range(100):
self.render_scene()
glfw.swap_buffers(w)
else:
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, fb)
self.render_scene()
GL.glFinish()
# fetch the rendered image
result = numpy.zeros(shape=(ch, cw, 4), dtype=arr.dtype)
GL.glReadPixels(0, 0, cw, ch, GL.GL_RGBA, gl_type, result)
print(cw, ch)
print(result.shape)
# print(result.shape)
# clean up
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
GL.glDeleteTextures([cube_color_tex,])
GL.glDeleteFramebuffers([fb,])
glfw.destroy_window(w)
glfw.terminate()
# raise NotImplementedError()
return result
def to_cube(arr):
w = arr.shape[0]
h = arr.shape[1]
aspect = w / h
if aspect == 2:
return Converter().cube_from_equirect(arr)
raise NotImplementedError()
def main(arr):
if (arr.dtype == numpy.float32):
# Clip data to percentile range with dynamic range below 65535
pct_low = 0
pct_high = 100
val_low, val_high = numpy.percentile(arr[numpy.nonzero(arr)], [pct_low, pct_high])
dynamic_range = val_high / val_low
eps = 0.07
while dynamic_range > 65535:
pct_low = eps
pct_high = 100.0 - eps
val_low, val_high = numpy.percentile(arr[numpy.nonzero(arr)], [pct_low, pct_high])
dynamic_range = val_high / val_low
print(pct_low, pct_high, val_low, val_high, dynamic_range)
eps *= 1.2
arr *= 65535.0 / val_high
arr[arr>65535] = 65535
arr[arr<0] = 0
# print(numpy.histogram(arr))
arr = arr.astype('uint16')
cube = Converter().cube_from_equirect(arr)
return cube
if __name__ == "__main__":
if True:
tif = TIFF.open('1w180.9.tiff', 'r')
arr = tif.read_image()
tif.close()
else:
jpeg = Image.open('_0010782_stitch2.jpg')
arr = numpy.array(jpeg)
cube = main(arr)
if cube.dtype == numpy.uint16:
img = png.from_array(cube, 'RGBA')
img.save('cube.png')
else:
Image.fromarray(cube).save('cube.jpg', quality=95)
| 38.919118 | 129 | 0.53344 | 8,733 | 0.824957 | 0 | 0 | 0 | 0 | 0 | 0 | 5,080 | 0.479879 |
f08133a0ab8681553c9936415f848d5882f36db1 | 1,150 | py | Python | src/controllers/storage.py | koddas/python-oop-consistency-lab | 8ee3124aa230359d296fdfbe0c23773602769c8c | [
"MIT"
] | null | null | null | src/controllers/storage.py | koddas/python-oop-consistency-lab | 8ee3124aa230359d296fdfbe0c23773602769c8c | [
"MIT"
] | null | null | null | src/controllers/storage.py | koddas/python-oop-consistency-lab | 8ee3124aa230359d296fdfbe0c23773602769c8c | [
"MIT"
] | null | null | null | from entities.serializable import Serializable
class Storage:
'''
Storage represents a file storage that stores and retrieves objects
'''
def __init__(self):
pass
def save(self, filename: str, data: Serializable) -> bool:
'''
Stores a serializable object. If the object isn't explicitly marked as
being serializable, this method will fail.
'''
if not issubclass(data.__class__, Serializable):
return False
f = open(filename, "w")
f.write(data.serialize())
f.close()
return True
def read(self, filename: str, class_name: type) -> Serializable:
'''
Retrieves a serialized object. You specify the type of he object to
deserialize by passing the class (as a type, not a string) as the
second parameter.
'''
if not issubclass(class_name, Serializable):
return None
f = open(filename, "r")
data = f.read()
f.close()
deserialized = class_name.deserialize(data)
return deserialized | 28.75 | 78 | 0.578261 | 1,102 | 0.958261 | 0 | 0 | 0 | 0 | 0 | 0 | 425 | 0.369565 |
f081d74683e4da50d27ee2a254cfa3157f59305b | 924 | py | Python | tests/functional/modules/test_zos_tso_command.py | IBM/zos-core-collection-ftp | 017d2e031d64984571bd9bb330f49adaced387a6 | [
"Apache-2.0"
] | 4 | 2021-03-17T02:24:02.000Z | 2022-01-28T22:08:17.000Z | tests/functional/modules/test_zos_tso_command.py | IBM/zos-core-collection-ftp | 017d2e031d64984571bd9bb330f49adaced387a6 | [
"Apache-2.0"
] | null | null | null | tests/functional/modules/test_zos_tso_command.py | IBM/zos-core-collection-ftp | 017d2e031d64984571bd9bb330f49adaced387a6 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import sys
import warnings
import ansible.constants
import ansible.errors
import ansible.utils
import pytest
from pprint import pprint
# The positive path test
def test_zos_tso_command_listuser(ansible_adhoc):
hosts = ansible_adhoc(inventory='localhost', connection='local')
print('--- hosts.all ---')
pprint(hosts.all)
pprint(hosts.all.options)
pprint(vars(hosts.all.options['inventory_manager']))
pprint(hosts.all.options['inventory_manager']._inventory.hosts)
hosts.all.options['inventory_manager']._inventory.hosts
results = hosts.localhost.zos_tso_command(commands=["LU"])
print('--- results.contacted ---')
pprint(results.contacted)
for result in results.contacted.values():
assert result.get("output")[0].get("rc") == 0
assert result.get("changed") is True
| 30.8 | 68 | 0.737013 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.183983 |
f0827a9bc1fab116569d8485fa3cf7975cc20e07 | 1,859 | py | Python | Medium/78.py | Hellofafar/Leetcode | 7a459e9742958e63be8886874904e5ab2489411a | [
"CNRI-Python"
] | 6 | 2017-09-25T18:05:50.000Z | 2019-03-27T00:23:15.000Z | Medium/78.py | Hellofafar/Leetcode | 7a459e9742958e63be8886874904e5ab2489411a | [
"CNRI-Python"
] | 1 | 2017-10-29T12:04:41.000Z | 2018-08-16T18:00:37.000Z | Medium/78.py | Hellofafar/Leetcode | 7a459e9742958e63be8886874904e5ab2489411a | [
"CNRI-Python"
] | null | null | null | # ------------------------------
# 78. Subsets
#
# Description:
# Given a set of distinct integers, nums, return all possible subsets (the power set).
# Note: The solution set must not contain duplicate subsets.
#
# For example,
# If nums = [1,2,3], a solution is:
# [
# [3],
# [1],
# [2],
# [1,2,3],
# [1,3],
# [2,3],
# [1,2],
# []
# ]
#
# Version: 1.0
# 01/20/18 by Jianfa
# ------------------------------
class Solution(object):
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if not nums:
return [[]]
result = []
for i in range(len(nums) + 1):
result += self.combine(nums, i)
return result
def combine(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[List[int]]
"""
res = []
currlist = []
self.backtrack(nums, k, currlist, 0, res)
return res
def backtrack(self, nums, k, currlist, start, res):
if len(currlist) == k:
temp = [x for x in currlist]
res.append(temp)
elif len(nums) - start + len(currlist) < k:
return
else:
for i in range(start, len(nums)):
currlist.append(nums[i])
self.backtrack(nums, k, currlist, i+1, res)
currlist.pop()
# Used for testing
if __name__ == "__main__":
test = Solution()
nums = [1,3,5]
test.subsets(nums)
# ------------------------------
# Summary:
# Borrow the combine idea from 77.py. The major difference is here a number list is provided.
# The number list may include discontinuous integers. So the parameter "start" here means index
# rather than number itself. | 24.142857 | 96 | 0.483593 | 1,046 | 0.562668 | 0 | 0 | 0 | 0 | 0 | 0 | 866 | 0.465842 |
f0827ff350329e8456da34903e3aafb85e4c8ff7 | 10,707 | py | Python | blueprints/finance/views.py | shuxiang/MT-WMS | 38ef18baed6d9eddb88d43da2eeed55988410daf | [
"Apache-2.0"
] | 1 | 2022-03-11T05:42:25.000Z | 2022-03-11T05:42:25.000Z | blueprints/finance/views.py | shuxiang/MT-WMS | 38ef18baed6d9eddb88d43da2eeed55988410daf | [
"Apache-2.0"
] | null | null | null | blueprints/finance/views.py | shuxiang/MT-WMS | 38ef18baed6d9eddb88d43da2eeed55988410daf | [
"Apache-2.0"
] | null | null | null | #coding=utf8
import json
from sqlalchemy import func, or_
from pprint import pprint
from datetime import datetime, timedelta
from random import randint
from flask import Blueprint, g, request, jsonify
from flask_login import LoginManager, login_user, logout_user, login_required, current_user
from extensions.database import db
from extensions.permissions import admin_perm, manager_perm, normal_perm
from models.inv import Inv, Good, Category, InvRfid
from models.inv import InvMove, InvAdjust, InvCount
from models.stockin import Stockin
from models.stockout import Stockout
from models.finance import Money, MoneySummary, MoneyAccount
from models.auth import Partner
from utils.flask_tools import json_response, gen_csv
from utils.functions import gen_query, clear_empty, json2mdict, json2mdict_pop
from utils.functions import update_model_with_fields, m2dict, copy_and_update_model, common_poplist
from utils.functions import gen_query
from utils.base import Dict, DictNone
from blueprints.finance.action import FinanceAction
import settings
bp_finance = Blueprint("finance", __name__)
# 获取订单统计数据-订单状态数量统计
@bp_finance.route('/money', methods=('GET', 'POST', 'PUT', 'PATCH', 'DELETE'))
@bp_finance.route('/money/<int:money_id>', methods=('GET', 'POST', 'PUT', 'PATCH', 'DELETE'))
@normal_perm.require()
def money_api(money_id=None):
if request.method == 'GET':
query = Money.query.t_query
res = gen_query(request.args.get('q', None), query, Money, db=db, per_page=settings.PER_PAGE)
subq = Money.query.with_entities(func.sum(Money.amount).label('amount'), func.sum(Money.real).label('real'), func.sum(Money.bad).label('bad')).t_query
subq = gen_query(request.args.get('q', None), subq, Money, db=db, export=True)
income = subq.filter_by(come='income').order_by(None).group_by(Money.come).first()
outcome = subq.filter_by(come='outcome').order_by(None).group_by(Money.come).first()
res['income_amount'] = income.amount if income else 0
res['income_real'] = income.real if income else 0
res['income_bad'] = income.bad if income else 0
res['outcome_amount'] = outcome.amount if outcome else 0
res['outcome_real'] = outcome.real if outcome else 0
res['outcome_bad'] = outcome.bad if outcome else 0
return json_response(res)
if request.method == 'POST':
is_clear = request.json.pop('is_clear', False)
date_forcount = request.json.pop('date_forcount', '')
remark = request.json.pop('remark', '')
m = Money(user_code=g.user.code, company_code=g.company_code, owner_code=g.owner_code, warehouse_code=g.warehouse_code,
**json2mdict_pop(Money, clear_empty(request.json)))
m.code = m.code.strip()
db.session.add(m)
m.is_clear = is_clear
m.date_forcount = date_forcount[:10] if date_forcount else datetime.now().date()
m.remark = remark
if m.code:
m.subcode = 'extra'
if m.partner_code:
_partner = Partner.query.c_query.filter_by(code=m.partner_code).first()
if _partner:
m.partner_str = '%s %s' % (_partner.name, _partner.tel or _partner.code)
m.partner_name = _partner.name
m.partner_id = _partner.id
else:
m.partner_str = m.partner_code
db.session.commit()
return json_response({'status': 'success', 'msg': 'ok', 'data': m.as_dict})
if request.method == 'PUT':
is_clear = request.json.pop('is_clear', False)
date_forcount = request.json.pop('date_forcount', '')
remark = request.json.pop('remark', '')
request.json.pop('real', None)
m = Money.query.t_query.filter_by(id=money_id).first()
m.update(json2mdict_pop(Money, clear_empty(request.json, False)))
m.code = m.code.strip()
m.is_clear = is_clear
m.date_forcount = date_forcount[:10] or datetime.now().date()
m.remark = remark or m.remark
if m.code:
m.subcode = 'extra'
if m.partner_code:
_partner = Partner.query.c_query.filter_by(code=m.partner_code).first()
if _partner:
m.partner_str = '%s %s' % (_partner.name, _partner.tel or _partner.code)
m.partner_name = _partner.name
m.partner_id = _partner.id
else:
m.partner_str = m.partner_code
db.session.commit()
return json_response({'status': 'success', 'msg': 'ok',})
if request.method == 'DELETE':
m = Money.query.t_query.filter_by(id=money_id).with_for_update().first()
if m.state == 'create':
m.state = 'cancel'
db.session.commit()
return json_response({'status': 'success', 'msg': 'ok',})
return ''
# 记录收款/付款流水
@bp_finance.route('/money/<action>/<int:money_id>', methods=('GET', 'POST', 'PUT', 'PATCH', 'DELETE'))
@normal_perm.require()
def money_action_api(action, money_id=None):
"""
post req:
{real, money_id, ref_user_code, remark}
"""
if request.method == 'POST':
action = FinanceAction()
money = Money.query.t_query.filter_by(id=money_id).first()
# (real, money, ref_user_code=None, remark='')
ok = action.do_money_trans(money=money, **clear_empty(request.json))
db.session.commit()
if ok:
return json_response({'status': 'success', 'msg': 'ok'})
else:
return json_response({'status': 'fail', 'msg': u'请输入正确的数字'})
return ''
# 记录收款/付款流水查询
@bp_finance.route('/money/trans/<int:money_id>', methods=('GET', 'POST', 'PUT', 'PATCH', 'DELETE'))
@normal_perm.require()
def money_trans_api(money_id=None):
m = Money.query.t_query.filter_by(id=money_id).first()
data = [r.as_dict for r in m.lines]
return json_response({'status': 'success', 'msg': u'ok', 'data': data})
# 获取订单统计数据-订单状态数量统计
@bp_finance.route('/money/summary', methods=('GET', 'POST', 'PUT', 'PATCH', 'DELETE'))
@normal_perm.require()
def money_summary_api():
if request.method == 'POST':
month = request.json.get('month', None)
xtype = request.json.get('xtype', None)
come = request.json.get('come', None)
action = FinanceAction()
table, month = action.summary(month=month, xtype=xtype, come=come)
return json_response({'status': 'success', 'data': table, 'month': month, 'msg': 'ok'}, indent=4)
return ''
# 按月份统计数据-最近12个月的数据
@bp_finance.route('/money/month/summary', methods=('GET', 'POST'))
@normal_perm.require()
def money_month_summary_api():
action = FinanceAction()
now = datetime.now()
year_ago = now.date() - timedelta(days=365)
mon = func.date_format(Money.date_forcount, '%Y-%m').label('mon')
current_mon = now.strftime('%Y-%m')
real = func.sum(Money.real).label('real')
for m in Money.query.with_entities(Money.xtype, Money.come, mon).t_query.filter(Money.date_forcount > year_ago).group_by(Money.xtype, Money.come, mon).all():
# print m.xtype, m.come, m.mon
table1 = []
table2 = []
if MoneySummary.query.t_query.filter_by(xtype=m.xtype, come=m.come, month=m.mon).count() == 0:
table1, month1 = action.summary(m.mon, m.xtype, m.come)
if MoneySummary.query.t_query.filter_by(come=m.come, month=m.mon).count() == 0:
table2, month2 = action.summary(m.mon, come=m.come)
for t in table1+table2:
if MoneySummary.query.t_query.filter_by(xtype=t.xtype, come=t.come, month=m.mon).count() == 0:
ms = MoneySummary(company_code=g.company_code, owner_code=g.owner_code, warehouse_code=g.warehouse_code)
# ms.real = t.real
ms.real = t.month_real
ms.amount = t.amount
ms.xtype = t.xtype
ms.come = t.come
ms.month = m.mon
db.session.add(ms)
# 当前月会刷新
else:
if m.mon == current_mon:
ms = MoneySummary.query.t_query.filter_by(xtype=t.xtype, come=t.come, month=m.mon).first()
ms.real = t.month_real
ms.amount = t.amount
db.session.commit()
data = DictNone({'in':[], 'out':[], 'in_real':[], 'out_real':[]})
res = MoneySummary.query.t_query.filter(MoneySummary.month > year_ago.strftime('%Y-%m'), MoneySummary.xtype=='').order_by(MoneySummary.month.asc()).all()
data.category = list(set([r.month for r in res]))
data.category.sort()
ind = {r.month:r for r in res if r.come == 'income'}
outd = {r.month:r for r in res if r.come == 'outcome'}
for c in data.category:
d = ind.get(c, None)
data['in'].append(d.amount if d else 0)
data['in_real'].append(d.real if d else 0)
d = outd.get(c, None)
data['out'].append(d.amount if d else 0)
data['out_real'].append(d.real if d else 0)
return json_response(data, indent=4)
# 获取订单统计数据-订单状态数量统计
@bp_finance.route('/money/account', methods=('GET', 'POST', 'PUT', 'PATCH', 'DELETE'))
@bp_finance.route('/money/account/<int:account_id>', methods=('GET', 'POST', 'PUT', 'PATCH', 'DELETE'))
@normal_perm.require()
def money_account_api(account_id=None):
if request.method == 'GET':
query = MoneyAccount.query.t_query
res = gen_query(request.args.get('q', None), query, MoneyAccount, db=db, per_page=settings.PER_PAGE)
return json_response(res)
if request.method == 'POST':
remark = request.json.pop('remark', '')
m = MoneyAccount(company_code=g.company_code, owner_code=g.owner_code, warehouse_code=g.warehouse_code,
**json2mdict_pop(MoneyAccount, clear_empty(request.json)))
db.session.add(m)
m.remark = remark
m.set_longname()
db.session.commit()
return json_response({'status': 'success', 'msg': 'ok', 'data': m.as_dict})
if request.method == 'PUT':
remark = request.json.pop('remark', '')
m = MoneyAccount.query.t_query.filter_by(id=account_id).first()
m.update(json2mdict_pop(MoneyAccount, clear_empty(request.json, False)))
m.remark = remark or m.remark
m.set_longname()
db.session.commit()
return json_response({'status': 'success', 'msg': 'ok',})
if request.method == 'DELETE':
m = MoneyAccount.query.t_query.filter_by(id=MoneyAccount_id).with_for_update().first()
if m.state == 'on':
m.state = 'off'
db.session.commit()
return json_response({'status': 'success', 'msg': 'ok',})
return '' | 39.21978 | 161 | 0.631082 | 0 | 0 | 0 | 0 | 9,513 | 0.873153 | 0 | 0 | 1,664 | 0.152731 |
f082e2a2d09bf1830b2b1fcb472bd7a239f75622 | 716 | py | Python | layers/modules/fast_mask_iou.py | sebastian-ruiz/yolact | 0fdce34ddd38d8895445444f04c5a9d4e0384a3e | [
"MIT"
] | 1 | 2021-11-08T14:06:49.000Z | 2021-11-08T14:06:49.000Z | layers/modules/fast_mask_iou.py | sebastian-ruiz/yolact | 0fdce34ddd38d8895445444f04c5a9d4e0384a3e | [
"MIT"
] | null | null | null | layers/modules/fast_mask_iou.py | sebastian-ruiz/yolact | 0fdce34ddd38d8895445444f04c5a9d4e0384a3e | [
"MIT"
] | null | null | null | import torch
from torch import nn
import torch.nn.functional as F
#locals
from data.config import Config
from utils.functions import make_net
from utils.script_module_wrapper import ScriptModuleWrapper, script_method_wrapper
class FastMaskIoUNet(ScriptModuleWrapper):
def __init__(self, config:Config):
super().__init__()
cfg = config
input_channels = 1
last_layer = [(cfg.num_classes-1, 1, {})]
self.maskiou_net, _ = make_net(input_channels, cfg.maskiou_net + last_layer, include_last_relu=True)
def forward(self, x):
x = self.maskiou_net(x)
maskiou_p = F.max_pool2d(x, kernel_size=x.size()[2:]).squeeze(-1).squeeze(-1)
return maskiou_p
| 28.64 | 108 | 0.706704 | 487 | 0.680168 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.009777 |
b2b1db3b982c41901d0ae5c563cb502c2d0bce3e | 3,366 | py | Python | audio_pouring/utils/network.py | lianghongzhuo/MultimodalPouring | 6495c7de9afad396f39bd7ac25e1a150e74479d2 | [
"MIT"
] | 5 | 2020-03-12T16:36:32.000Z | 2021-01-28T18:23:19.000Z | audio_pouring/utils/network.py | lianghongzhuo/MultimodalPouring | 6495c7de9afad396f39bd7ac25e1a150e74479d2 | [
"MIT"
] | null | null | null | audio_pouring/utils/network.py | lianghongzhuo/MultimodalPouring | 6495c7de9afad396f39bd7ac25e1a150e74479d2 | [
"MIT"
] | 1 | 2020-03-11T17:09:28.000Z | 2020-03-11T17:09:28.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : Hongzhuo Liang
# E-mail : liang@informatik.uni-hamburg.de
# Description:
# Date : 15/10/2019: 22:13
# File Name : network
import argparse
import numpy as np
import torch
def worker_init_fn(pid):
np.random.seed(torch.initial_seed() % (2 ** 31 - 1))
def my_collate(batch):
batch = list(filter(lambda x: x is not None, batch))
return torch.utils.data.dataloader.default_collate(batch)
def parse():
parser = argparse.ArgumentParser(description="audio2height")
parser.add_argument("--tag", type=str, default="")
parser.add_argument("--epoch", type=int, default=500)
parser.add_argument("--mode", choices=["train", "test"], default="train")
parser.add_argument("--bs", type=int, default=10)
parser.add_argument("--hidden-dim", type=int, default=256)
parser.add_argument("--layer-num", type=int, default=1)
parser.add_argument("--lstm", action="store_true")
parser.add_argument("--cuda", action="store_true")
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--bottle-train", type=str, default="0")
parser.add_argument("--bottle-test", type=str, default="")
parser.add_argument("--lr", type=float, default=0.0001)
parser.add_argument("--snr_db", type=float, required=True)
parser.add_argument("--mono-coe", type=float, default=0.001)
parser.add_argument("--load-model", type=str, default="")
parser.add_argument("--load-epoch", type=int, default=-1)
parser.add_argument("--model-path", type=str, default="./assets/learned_models", help="pre-trained model path")
parser.add_argument("--data-path", type=str, default="h5py_dataset", help="data path")
parser.add_argument("--log-interval", type=int, default=10)
parser.add_argument("--save-interval", type=int, default=10)
parser.add_argument("--robot", action="store_true")
parser.add_argument("--multi", action="store_true")
parser.add_argument("--minus_wrench_first", action="store_true")
parser.add_argument("--stft_force", action="store_true")
parser.add_argument("--bidirectional", action="store_true")
parser.add_argument("--draw_acc_fig", action="store_true")
parser.add_argument("--acc_fig_name", type=str, default="")
parser.add_argument("--multi-detail", choices=["2loss2rnn", "2loss1rnn", "1loss1rnn", "audio_only", "a_guide_f",
"a_f_early_fusion", "force_only", "1loss2rnn"], default="audio_only")
args = parser.parse_args()
if args.bottle_test == "":
args.bottle_test = args.bottle_train
if args.tag != "":
args.tag += "_"
base = args.tag + "{}_{}{}_h{}_bs{}_bottle{}to{}_mono_coe{}_snr{}_{}_{}_{}_{}"
tag = base.format("multi" if args.multi else "audio", "lstm" if args.lstm else "gru", args.layer_num,
args.hidden_dim, args.bs, args.bottle_train, args.bottle_test, args.mono_coe, args.snr_db,
args.multi_detail, "minus_wrench_first" if args.minus_wrench_first else "raw",
"stft_force" if args.stft_force else "raw_force",
"bidirectional" if args.bidirectional else "unidirectional")
args.tag = tag
args.acc_fig_name = "snr{}_{}".format(args.snr_db, "lstm" if args.lstm else "gru")
return args
| 49.5 | 120 | 0.659834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,051 | 0.31224 |
b2b1ff5ef4ba336018262956f57a372c5c93879b | 4,312 | py | Python | FederatedSDNSecurity/main.py | Beaconproj/CrossCloudVNFSimulation | 97023e05b57e54503259ae866608de6189b8c9a9 | [
"MIT"
] | 1 | 2021-09-25T04:17:55.000Z | 2021-09-25T04:17:55.000Z | FederatedSDNSecurity/main.py | Beaconproj/CrossCloudVNFSimulation | 97023e05b57e54503259ae866608de6189b8c9a9 | [
"MIT"
] | null | null | null | FederatedSDNSecurity/main.py | Beaconproj/CrossCloudVNFSimulation | 97023e05b57e54503259ae866608de6189b8c9a9 | [
"MIT"
] | null | null | null |
'''
Created on 12 janv. 2016
@author: phm
'''
import FederatedSDN
import FederatedSDNSecurity
import FederatedSecurityAgent
import VNFManager
import CloudManager
import ssl, socket
inputMessage="Press enter to continue"
federationCloudManagers=[]
cloudFederationMembers=[
["cloud_man_1","vnf_manager_1", "network_segment_1", [["" ]],[""]],
["cloud_man_2","vnf_manager_2", "network_segment_2", [[""]],[""]],
["cloud_man_3","vnf_manager_3", "network_segment_3", [[""]],[""]]
]
print "-----------Initial setup of Cloud_1, cloud_2 and cloud_3 -----------"
cloudMember=""
for cloudMember in cloudFederationMembers:
# create a cloud manager
cloud_manager=CloudManager.CloudManager(cloudMember[0])
print "Cloud manager", cloud_manager.getName(), "in federation"
federationCloudManagers.append(cloud_manager)
# set the network segments
cloud_manager.setNetworkSegments(cloudMember[2])
# create a VNF manager
vnfManager=VNFManager.VNFManager(cloudMember[1])
cloud_manager.setVNFManager(vnfManager)
print "------------ start Federated SDN ---------------"
# create a federated SDN
fedSDN=FederatedSDN.FederatedSDN("fedSDN_1")
print "FederatedSDN", fedSDN.getIdentifier(), "created"
# create a Federated SDN security
fedSDNSecurity=FederatedSDNSecurity.FederatedSDNSecurity("fedSDNSec_1")
print "FederatedSDNSecurity", fedSDNSecurity.getName(), "created"
print "------------- Create a Federated Cloud Network --------------------"
# get the network segments to be federated
network_segments=["network_segment_1","network_segment_2"]
#cloud_member=""
#for cloud_member in cloudFederationMembers:
# network_segments.append(cloud_member[2])
#print "network segments:", network_segments
fedSDN.createNetworkFederation("FedCloudNetwork_1", network_segments)
print "Federated network", fedSDN.getNetworkFederationSegments("FedCloudNetwork_1"), "created"
# Associate a FederatedSecurityAgent with each network segment"
cloudManager=""
for cloudManager in federationCloudManagers:
network_segment=cloudManager.getNetworkSegments()
fedSecAg=FederatedSecurityAgent.FederatedSecurityAgent("fedSecAg_"+network_segment[0])
#print "SecAgent", fedSecAg.getName(), "created"
fedSecAg.setVNFManager(cloudManager.getVNFManager())
fedSecAg.setNetworkSegment(cloudManager.getNetworkSegments())
fedSDNSecurity.addSecurityAgent(fedSecAg)
#print "----------- Analyse existing security VNF of federation network segments ----------"
#print "------------- Adapt VNF to respect global security policy: start new VNF and re-configure existing VNF --------"
print "------------- Deploy, configure and start VNF to respect global security policy --------"
wait = raw_input(inputMessage)
fedSDNSecurity.readYAMLfile("YAML1.txt")
#fedSDNSecurity.readYAMLfileV2("Cloud1-2-Heat.yaml")
print "-------- Verify that global security policy is correctly implemented in each federation cloud network ----------"
wait = raw_input(inputMessage)
fedSDNSecurity.verifySecurityPolicy(fedSDN)
print "------------- Run the network federation --------------"
wait = raw_input(inputMessage)
print "VM_1: send packet to VM_2 with protocol HTTP"
print "VM_2: received packet from VM_1"
print " "
print "VM_1: send packet to VM_2 with protocol SKYPE"
print "DPI_1: unauthorized protocol detected: SKYPE"
print "FW_1: reconfiguring firewall on network network_segment_1 to block SKYPE protocol"
print "------------- now add a new network_segment_3 to the federation and extend the security policy--------------"
wait = raw_input(inputMessage)
# add network segment to federation
fedSDN.addNetworkSegment("network_segment_3")
print "Federated network", fedSDN.getNetworkFederationSegments("FedCloudNetwork_1"), "extended"
fedSDNSecurity.readYAMLfile("YAML2.txt")
print "-------- Verify that global security policy is implemented VNF per network Segment ----------"
wait = raw_input(inputMessage)
fedSDNSecurity.verifySecurityPolicy(fedSDN)
print "------------- Run the network federation --------------"
wait = raw_input(inputMessage)
print "VM_1: send packet to VM_3 with protocol X "
print "ENCRYPT_1: VM_3 is in untrusted cloud: encrypt packet"
print "DECRYPT_3: packet for VM_3 from VM_1 is encrypted: decrypt packet using key XXX "
| 30.8 | 120 | 0.73539 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,421 | 0.561456 |
b2b2407d4c36f7d2b4d5556ee9ab15297445f03f | 5,121 | py | Python | WCET_stats.py | FTOD/ZExp | f7e2e1ab3ce1964022cb1c5d8c9d0b1ce1ee7b56 | [
"MIT"
] | null | null | null | WCET_stats.py | FTOD/ZExp | f7e2e1ab3ce1964022cb1c5d8c9d0b1ce1ee7b56 | [
"MIT"
] | null | null | null | WCET_stats.py | FTOD/ZExp | f7e2e1ab3ce1964022cb1c5d8c9d0b1ce1ee7b56 | [
"MIT"
] | null | null | null | import parsetools
from benchDesc import benchsDesc
import matplotlib.pyplot as plt
import matplotlib
import getopt, sys
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["arch="])
except getopt.GetoptError as err:
print(err)
sys.exit(2)
file_postfix = ""
for o,a in opts:
if o == "--arch":
if a == "simple":
file_postfix = file_postfix + "_simple"
elif a == "complex":
file_postfix = file_postfix + "_complex"
else:
print ("ERROR, the architecture must be either simple or complex")
p = parsetools.BoundedEventsCountParser()
res = p.parse_all_files("../log_2020_09/log")
res = benchsDesc.regrouping_parallel_res(res)
bounded_count = res
print("BOUNDED=", bounded_count)
p = parsetools.UnboundedEventsCountParser()
res = p.parse_all_files("../log_2020_09/log")
res = benchsDesc.regrouping_parallel_res(res)
unbounded_count = res
print("UNBOUNDED=", unbounded_count)
p = parsetools.WcetResParser()
res = p.parse_all_files("../log_2020_09/log_xddilp_15"+file_postfix)
res = benchsDesc.regrouping_parallel_res(res)
wcet_xdd = res
#add a single result
print(res)
print(len(res))
p = parsetools.WcetResParser()
res = p.parse_all_files("../log_2020_09/log_hlts_15"+file_postfix)
res = benchsDesc.regrouping_parallel_res(res)
wcet_hlts = res
print(res)
print(len(res))
p = parsetools.WcetResParser()
res = p.parse_all_files("../log_2020_09/log_WCETmax_15"+file_postfix)
res = benchsDesc.regrouping_parallel_res(res)
wcet_max = res
print(res)
print(len(res))
p = parsetools.WcetResParser()
res = p.parse_all_files("../log_2020_09/log_exhaustive_15"+file_postfix)
res = benchsDesc.regrouping_parallel_res(res)
wcet_exhau = res
print(res)
print(len(res))
x = list(range(1,len(res)+1))
print(x)
print("=======================================================")
BIGGER_SIZE = 11
BIGGER_BIGGER_SIZE=15
matplotlib.rc('font', size=BIGGER_SIZE) # controls default text sizes
matplotlib.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
matplotlib.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels
matplotlib.rc('xtick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
matplotlib.rc('ytick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
matplotlib.rc('legend', fontsize=BIGGER_BIGGER_SIZE) # legend fontsize
matplotlib.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
fig = plt.figure()
#unbound_ratio = [ float(x[1]) / float(x[1]+y[1]) for x,y in zip(unbounded_count,bounded_count)]
unbound_ratio = [( x[0], float(x[1]) / float(x[1]+y[1]) ) for x,y in zip(unbounded_count,bounded_count)]
unbound_ratio.sort(key = lambda i:i[1])
print("***************************")
print(unbound_ratio)
print("***************************")
label_order = [x[0] for x in unbound_ratio]
print(label_order)
unbound_ratio = [x[1] for x in unbound_ratio]
wcet_xdd.sort(key = lambda i: label_order.index(i[0]))
wcet_hlts.sort(key = lambda i: label_order.index(i[0]))
wcet_max.sort(key = lambda i: label_order.index(i[0]))
wcet_exhau.sort(key = lambda i: label_order.index(i[0]))
wcet_xdd = [x[1] for x in wcet_xdd]
wcet_hlts = [x[1] for x in wcet_hlts]
wcet_max = [x[1] for x in wcet_max]
wcet_exhau = [x[1] for x in wcet_exhau]
wcet_xdd = [(y-x)/y for x,y in zip(wcet_xdd,wcet_max)]
wcet_hlts = [(y-x)/y for x,y in zip(wcet_hlts,wcet_max)]
## Rounding, due to imprecision of Etime
wcet_hlts = [ 0.0 if x < 0.0 else x for x in wcet_hlts ]
wcet_exhau = [(y-x)/y for x,y in zip(wcet_exhau,wcet_max)]
print("=======================================================")
print(wcet_xdd)
print(len(res))
print("=======================================================")
print(wcet_exhau)
print(len(res))
print("=======================================================")
print(wcet_hlts)
print(len(res))
ax = fig.add_subplot(111)
width = 0.2
ax.bar([y-width for y in x],wcet_xdd,label='xdd',width=width, color ="1.0" , edgecolor='black')
ax.bar([y for y in x],wcet_exhau,label='exhaustive',width=width, color = "0.7", edgecolor='black')
ax.bar([y+width for y in x],wcet_hlts,label='Etime',width=width, color = "0",edgecolor='black')
#ax.bar([y+0.2 for y in x],wcet_max,label='MAX',width=0.5,color='darkgray')
ax.set_ylabel('WCET / WCET of max partitioning',fontsize=12)
#ax.set_xlabel('benchmark',fontsize=12)
ax.set_xticks(x)
ax.set_xticklabels(label_order,rotation=80)
ax.legend(loc='upper left')
#plt.yscale('log')
plt.ylim(top=0.6)
unbound_ratio = [x for x in unbound_ratio]
ax1 = ax.twinx()
ax1.set_ylabel("percentage on unbounded events")
ax1.plot(x,unbound_ratio,'o-',color='black')
plt.subplots_adjust(bottom=0.17,top=0.70,right=0.965,left=0.042)
plt.yticks(fontsize=15)
"""
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=True, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False
) # labels along the bottom edge are off
"""
plt.show()
#ax = df.plot.scatter(x='evt',)
| 31.036364 | 104 | 0.670767 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,643 | 0.320836 |
b2b2c142b45b87b8147bfd47d58eb146d6e75472 | 610 | py | Python | schoolport/app_core/migrations/0024_auto_20210513_1020.py | yotink522/schoolport | c6cfd0230ca05fb44f77c2f27c7e200828547bd5 | [
"MIT"
] | null | null | null | schoolport/app_core/migrations/0024_auto_20210513_1020.py | yotink522/schoolport | c6cfd0230ca05fb44f77c2f27c7e200828547bd5 | [
"MIT"
] | null | null | null | schoolport/app_core/migrations/0024_auto_20210513_1020.py | yotink522/schoolport | c6cfd0230ca05fb44f77c2f27c7e200828547bd5 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-05-13 02:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_core', '0023_auto_20210513_1004'),
]
operations = [
migrations.AlterField(
model_name='tb_course',
name='pricing_standard_nos',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='tb_course',
name='remarks',
field=models.CharField(blank=True, max_length=1024, null=True),
),
]
| 25.416667 | 75 | 0.603279 | 517 | 0.847541 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.221311 |
b2b4b4908763c3a4a83c42ca39a61f42cc6d7104 | 800 | py | Python | galaxy_kickstart/binary_datatypes_to_be_added_in_galaxy.py | pajanne/galaxy-kickstart | a2e8a9d5f4e610f00548deab691d71290aa3a3b7 | [
"MIT"
] | 2 | 2016-08-04T19:16:17.000Z | 2016-08-04T19:45:58.000Z | galaxy_kickstart/binary_datatypes_to_be_added_in_galaxy.py | pajanne/galaxy-kickstart | a2e8a9d5f4e610f00548deab691d71290aa3a3b7 | [
"MIT"
] | null | null | null | galaxy_kickstart/binary_datatypes_to_be_added_in_galaxy.py | pajanne/galaxy-kickstart | a2e8a9d5f4e610f00548deab691d71290aa3a3b7 | [
"MIT"
] | null | null | null | class CompressedFastq( CompressedArchive ):
"""
Class describing an compressed fastq file
This class can be sublass'ed to implement archive filetypes that will not be unpacked by upload.py.
"""
file_ext = "fq.gz"
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
dataset.peek = "Compressed fastq file"
dataset.blurb = nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek( self, dataset ):
try:
return dataset.peek
except:
return "Compressed fastq file (%s)" % ( nice_size( dataset.get_size() ) )
Binary.register_unsniffable_binary_ext("fq.gz")
| 33.333333 | 107 | 0.62625 | 749 | 0.93625 | 0 | 0 | 0 | 0 | 0 | 0 | 278 | 0.3475 |
b2b541552dee04f9e9bcd11e4c109a74ce0c81b7 | 1,697 | py | Python | timesketch/lib/cypher/insertable_string.py | Marwolf/timesketch | 8fbbb3d0a5a50dc0214fc56a9bbec82050908103 | [
"Apache-2.0"
] | null | null | null | timesketch/lib/cypher/insertable_string.py | Marwolf/timesketch | 8fbbb3d0a5a50dc0214fc56a9bbec82050908103 | [
"Apache-2.0"
] | null | null | null | timesketch/lib/cypher/insertable_string.py | Marwolf/timesketch | 8fbbb3d0a5a50dc0214fc56a9bbec82050908103 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the InsertableString class."""
class InsertableString(object):
"""Class that accumulates insert and replace operations for a string and
later performs them all at once so that positions in the original string
can be used in all of the operations.
"""
def __init__(self, input_string):
self.input_string = input_string
self.to_insert = []
def insert_at(self, pos, s):
"""Add an insert operation at given position."""
self.to_insert.append((pos, pos, s))
def replace_range(self, start, end, s):
"""Add a replace operation for given range. Assume that all
replace_range operations are disjoint, otherwise undefined behavior.
"""
self.to_insert.append((start, end, s))
def apply_insertions(self):
"""Return a string obtained by performing all accumulated operations."""
to_insert = reversed(sorted(self.to_insert))
result = self.input_string
for start, end, s in to_insert:
result = result[:start] + s + result[end:]
return result
| 39.465116 | 80 | 0.696523 | 1,046 | 0.616382 | 0 | 0 | 0 | 0 | 0 | 0 | 1,101 | 0.648792 |
b2b6aed7dde137dbec9d46784b9eb3493640ecc8 | 1,322 | py | Python | networks/rnn.py | uchikun2493/nn_modules | ad3486b842fc543561d39227de5daaa475d3513a | [
"MIT"
] | null | null | null | networks/rnn.py | uchikun2493/nn_modules | ad3486b842fc543561d39227de5daaa475d3513a | [
"MIT"
] | null | null | null | networks/rnn.py | uchikun2493/nn_modules | ad3486b842fc543561d39227de5daaa475d3513a | [
"MIT"
] | null | null | null | # pytorch
import torch
import torch.nn as nn
import torch.nn.functional as F
# **********************************************
# rnn class
# **********************************************
class BasicRNNCell(nn.Module):
def __init__(self, inputs, hidden, outputs):
super(BasicRNNCell, self).__init__()
self.l1 = nn.LSTMCell(inputs, hidden)
self.l2 = nn.Linear(hidden, outputs)
self.hidden = hidden
# Forward
def __call__(self, x, h):
h = self.l1(x, h)
y = self.l2(h)
return y, h
def reset_hidden(self):
return torch.zeros(1, self.hidden)
class BasicRNN(nn.Module):
def __init__(self, inputs, hidden, outputs):
super(BasicRNN, self).__init__()
self.l1 = nn.LSTM(inputs, hidden, num_layers=1)
self.l2 = nn.Linear(hidden, outputs)
self.n_layer = 1
self.hidden = hidden
# Forward
def __call__(self, x, h_req=False):
n_sample = 1
# 隠れ層の初期状態
h0 = torch.zeros(self.n_layer, n_sample, self.hidden)
# メモリセルの初期状態
c0 = torch.zeros(self.n_layer, n_sample, self.hidden)
# out, h = self.l1(x, (h0, c0))
out, h = self.l1(x)
y = self.l2(out[-1])
if h_req is False:
return y
else:
return y, h
| 24.481481 | 61 | 0.533283 | 1,164 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 223 | 0.164212 |
b2b6b4e0d84391cba2fed8691df94512a2cc5b7a | 1,141 | py | Python | config/settings/production.py | rimphyd/Django-OPAC | d86f2e28fee7f2ec551aeeb98ec67caefc06a3fb | [
"MIT"
] | 1 | 2020-11-26T05:25:46.000Z | 2020-11-26T05:25:46.000Z | config/settings/production.py | rimphyd/Django-OPAC | d86f2e28fee7f2ec551aeeb98ec67caefc06a3fb | [
"MIT"
] | null | null | null | config/settings/production.py | rimphyd/Django-OPAC | d86f2e28fee7f2ec551aeeb98ec67caefc06a3fb | [
"MIT"
] | null | null | null | import django_heroku
from config.settings.base import *
DEBUG = False
SECRET_KEY = os.environ['SECRET_KEY']
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(asctime)s %(levelname)s %(module)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'opac': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
'django': {
'handlers': ['console'],
'level': 'INFO',
'propagate': False,
},
},
}
SECURE_HSTS_PRELOAD = True
SECURE_HSTS_SECONDS = 31536000
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
X_FRAME_OPTIONS = 'DENY'
django_heroku.settings(locals())
| 20.017544 | 72 | 0.576687 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 371 | 0.325153 |
b2b7e4ac8602126a7252025c382dc07c1f558b19 | 1,181 | py | Python | fabrikApi/views/assembly/list.py | demokratiefabrik/fabrikApi | a56bb57d59a5e7cbbeeb77889c02d82f2a04c682 | [
"MIT"
] | null | null | null | fabrikApi/views/assembly/list.py | demokratiefabrik/fabrikApi | a56bb57d59a5e7cbbeeb77889c02d82f2a04c682 | [
"MIT"
] | null | null | null | fabrikApi/views/assembly/list.py | demokratiefabrik/fabrikApi | a56bb57d59a5e7cbbeeb77889c02d82f2a04c682 | [
"MIT"
] | null | null | null | """ Assemblies List View. """
import logging
from datetime import datetime
from cornice.service import Service
from fabrikApi.models.assembly import DBAssembly
from fabrikApi.models.mixins import arrow
# from fabrikApi.util.cors import CORS_LOCATION, CORS_MAX_AGE
logger = logging.getLogger(__name__)
# SERVICES
assemblies = Service(cors_origins=('*',),
name='assemblies',
description='List Assemblies.',
path='/assemblies')
@assemblies.get(permission='public')
def get_assemblies(request):
"""Returns all assemblies which are either public or accessible by the current user.
"""
# load all active assemblies
# TODO: filter only active assemblies
assemblies = request.dbsession.query(DBAssembly).all()
for assembly in assemblies:
# assembly.patch()
assembly.setup_lineage(request)
# show only assemblies with at least view permission.
assemblies = list(
filter(lambda assembly: request.has_public_permission(assembly),
assemblies)
)
assemblies = {v.identifier: v for v in assemblies}
return({
'assemblies': assemblies,
'access_date': arrow.utcnow()
})
| 25.673913 | 88 | 0.702794 | 0 | 0 | 0 | 0 | 734 | 0.621507 | 0 | 0 | 407 | 0.344623 |
b2b8a861bf96a35529dc0c381016dc12ddf8518f | 7,217 | py | Python | layers/layers.py | yangzonglin1994/yangzl-deep-text-matching | 2beadd1c2ebf2b169558b9978e0cbc66d1d25fc6 | [
"MIT"
] | 2 | 2018-08-10T20:02:44.000Z | 2018-08-10T20:02:50.000Z | layers/layers.py | yangzonglin1994/yangzl-text-matching | 2beadd1c2ebf2b169558b9978e0cbc66d1d25fc6 | [
"MIT"
] | 1 | 2018-07-30T08:54:35.000Z | 2018-07-30T08:54:35.000Z | layers/layers.py | yangzonglin1994/yangzl-text-matching | 2beadd1c2ebf2b169558b9978e0cbc66d1d25fc6 | [
"MIT"
] | null | null | null | import tensorflow as tf
from keras import backend as K
from keras.engine.topology import Layer
from keras.initializers import Ones, Zeros
from layers import transformer
class LayerNormalization(Layer):
def __init__(self, eps=1e-6, **kwargs):
super(LayerNormalization, self).__init__(**kwargs)
self.eps = eps
self.gamma = None
self.beta = None
def build(self, input_shape):
# Create trainable weight variables for this layer.
# 不同的行共享gamma和beta
self.gamma = self.add_weight(name='gamma', shape=input_shape[-1:],
initializer=Ones(), trainable=True)
self.beta = self.add_weight(name='beta', shape=input_shape[-1:],
initializer=Zeros(), trainable=True)
super(LayerNormalization, self).build(input_shape) # Be sure to call this at the end
def call(self, inputs, **kwargs):
mean = K.mean(inputs, axis=-1, keepdims=True)
std = K.std(inputs, axis=-1, keepdims=True)
# 类似于BN,LN在对样本归一化后也有缩放和平移操作
# Python中的广播
return self.gamma * (inputs - mean) / (std + self.eps) + self.beta
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'eps': self.eps}
base_config = super(LayerNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AvgEmb(Layer):
def __init__(self, word_vec_dim, **kwargs):
super(AvgEmb, self).__init__(**kwargs)
self.word_vec_dim = word_vec_dim
# 模板方法模式
def call(self, inputs, **kwargs):
inputs = tf.reduce_mean(inputs, axis=1, keepdims=True)
# return Reshape([self.word_vec_dim])(X)
return tf.reshape(inputs, [-1, self.word_vec_dim])
# 由于是静态计算图的框架,shape都并不可靠,可能不是预期的值
# 尽量使用已知值
def compute_output_shape(self, input_shape, **kwargs):
return input_shape[0], self.word_vec_dim
# 和保存相关的方法
# config = layer.get_config() or model.get_config() => 包含这个层配置信息的dict
# layer = Layer.from_config(config) or
# model = Model.from_config(config) or Sequential.from_config(config)
# 由于Keras其他层没有重写from_config方法,我的自定义层也不重写
def get_config(self):
config = {'word_vec_dim': self.word_vec_dim}
base_config = super(AvgEmb, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Repeat(Layer):
def __init__(self, rep, axis, batch_size, **kwargs):
Layer.__init__(self, **kwargs)
self.rep = rep
self.axis = axis
self.batch_size = batch_size
def call(self, inputs, **kwargs):
return K.repeat_elements(inputs, self.rep, self.axis)
def compute_output_shape(self, input_shape):
axis = self.axis
if axis == 0:
return self.rep*self.batch_size, input_shape[1], input_shape[2]
elif axis == 1:
return self.batch_size, self.rep*input_shape[1], input_shape[2]
elif axis == 2:
return self.batch_size, input_shape[1], self.rep*input_shape[2]
else:
raise ValueError('axis not in [0, 1, 2]')
def get_config(self):
config = {'rep': self.rep,
'axis': self.axis,
'batch_size': self.batch_size}
base_config = Layer.get_config(self)
# dict.items() will return a set-like object
return dict(base_config.items() | config.items())
class ScaledDotProduct(Layer):
def __init__(self, temper, **kwargs):
super(ScaledDotProduct, self).__init__(**kwargs)
self.temper = temper
def call(self, inputs, **kwargs):
q, k = inputs[0], inputs[1]
# K.batch_dot, batch-wise dot product
# batch-wise operation, element-wise operation, point-wise operation
# axes=[2, 2]意味着“砍掉”batch data tensor.shape的第三维
return K.batch_dot(q, k, axes=[2, 2]) / self.temper
def compute_output_shape(self, input_shape):
input_shape = input_shape[0]
return input_shape[0], input_shape[1], input_shape[1]
def get_config(self):
config = {'temper': self.temper}
base_config = super(ScaledDotProduct, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class MMask(Layer):
def __init__(self, **kwargs):
super(MMask, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
return (-1e+10) * (1-inputs)
def compute_output_shape(self, input_shape):
return input_shape
class WeightedSum(Layer):
def __init__(self, **kwargs):
super(WeightedSum, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
attn, v = inputs[0], inputs[1]
# 默认axes=[2, 1]
return K.batch_dot(attn, v)
class Reshape1(Layer):
def __init__(self, n_head, d_k, batch_size, **kwargs):
super(Reshape1, self).__init__(**kwargs)
self.n_head = n_head
self.d_k = d_k
self.batch_size = batch_size
def call(self, inputs, **kwargs):
n_head = self.n_head
d_k = self.d_k
s = tf.shape(inputs) # [batch_size, len_q, n_head * d_k]
inputs = tf.reshape(inputs, [s[0], s[1], n_head, d_k])
inputs = tf.transpose(inputs, [2, 0, 1, 3])
# -1意味着自动推断
inputs = tf.reshape(inputs, [-1, s[1], d_k]) # [n_head * batch_size, len_q, d_k]
return inputs
def compute_output_shape(self, input_shape):
return self.n_head*self.batch_size, input_shape[1], self.d_k
def get_config(self):
config = {'n_head': self.n_head,
'd_k': self.d_k,
'batch_size': self.batch_size}
base_config = super(Reshape1, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Reshape2(Layer):
def __init__(self, n_head, d_v, batch_size, **kwargs):
super(Reshape2, self).__init__(**kwargs)
self.n_head = n_head
self.d_v = d_v
self.batch_size = batch_size
def call(self, inputs, **kwargs):
n_head = self.n_head
d_v = self.d_v
s = tf.shape(inputs) # [n_head * batch_size, seq_len, d_v]
inputs = tf.reshape(inputs, [n_head, -1, s[1], s[2]])
inputs = tf.transpose(inputs, [1, 2, 0, 3])
# n_head * s[2]会出错!
inputs = tf.reshape(inputs, [-1, s[1], n_head * d_v]) # [batch_size, seq_len, n_head * d_v]
return inputs
def compute_output_shape(self, input_shape):
return self.batch_size, input_shape[1], self.n_head * self.d_v
def get_config(self):
config = {'n_head': self.n_head,
'd_v': self.d_v,
'batch_size': self.batch_size}
base_config = super(Reshape2, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class GetPadMask(Layer):
def __init__(self, **kwargs):
super(GetPadMask, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
return transformer.get_pad_mask(inputs, inputs)
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[1], input_shape[1]
| 35.033981 | 100 | 0.620479 | 7,290 | 0.973688 | 0 | 0 | 0 | 0 | 0 | 0 | 1,248 | 0.166689 |
b2b9128938a7476610fbf31df937ff94978048ae | 1,514 | py | Python | tests/TestMetrics.py | gr33ndata/irlib | 4a518fec994b1a89cdc7d09a8170efec3d7e6615 | [
"MIT"
] | 80 | 2015-02-16T18:33:57.000Z | 2021-05-06T02:03:22.000Z | tests/TestMetrics.py | gr33ndata/irlib | 4a518fec994b1a89cdc7d09a8170efec3d7e6615 | [
"MIT"
] | 2 | 2016-02-05T06:30:21.000Z | 2017-09-24T17:42:58.000Z | tests/TestMetrics.py | gr33ndata/irlib | 4a518fec994b1a89cdc7d09a8170efec3d7e6615 | [
"MIT"
] | 25 | 2015-05-13T17:35:41.000Z | 2020-06-04T01:52:11.000Z | from unittest import TestCase
from irlib.metrics import Metrics
class TestMetrics(TestCase):
def setUp(self):
self.m = Metrics()
def test_jaccard_same_len(self):
with self.assertRaises(ValueError):
self.m.jaccard_vectors(
[0, 1],
[0, 1, 2, 3]
)
def test_jaccard_empty(self):
e = self.m.jaccard_vectors([],[])
self.assertEqual(e,1)
def test_jaccard_int(self):
e = self.m.jaccard_vectors(
[0, 2, 1, 3],
[0, 1, 2, 3]
)
self.assertEqual(e,0.75)
def test_jaccard_bool(self):
e = self.m.jaccard_vectors(
[False, False, True, True, True ],
[False, True , True, True, False]
)
self.assertEqual(e,0.4)
def test_euclid_same_len(self):
with self.assertRaises(ValueError):
self.m.euclid_vectors(
[0, 1, 2, 3],
[0, 1]
)
def test_euclid(self):
e = self.m.euclid_vectors([1,1],[4,5])
self.assertEqual(e,5)
def test_cos_same_len(self):
with self.assertRaises(ValueError):
self.m.cos_vectors(
[0, 1, 2],
[1, 1]
)
def test_cos_0(self):
c = self.m.cos_vectors([1,0,1],[0,1,0])
self.assertEqual(round(c,5),float(0))
def test_cos_1(self):
c = self.m.cos_vectors([1,1,1],[1,1,1])
self.assertEqual(round(c,5),float(1))
| 24.819672 | 47 | 0.515192 | 1,445 | 0.954425 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b2bb1c7a2af64e0803771a48f87683d4a4a1c0d2 | 50,483 | py | Python | cottonformation/res/lookoutmetrics.py | gitter-badger/cottonformation-project | 354f1dce7ea106e209af2d5d818b6033a27c193c | [
"BSD-2-Clause"
] | null | null | null | cottonformation/res/lookoutmetrics.py | gitter-badger/cottonformation-project | 354f1dce7ea106e209af2d5d818b6033a27c193c | [
"BSD-2-Clause"
] | null | null | null | cottonformation/res/lookoutmetrics.py | gitter-badger/cottonformation-project | 354f1dce7ea106e209af2d5d818b6033a27c193c | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
@attr.s
class AnomalyDetectorCsvFormatDescriptor(Property):
"""
AWS Object Type = "AWS::LookoutMetrics::AnomalyDetector.CsvFormatDescriptor"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-csvformatdescriptor.html
Property Document:
- ``p_Charset``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-csvformatdescriptor.html#cfn-lookoutmetrics-anomalydetector-csvformatdescriptor-charset
- ``p_ContainsHeader``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-csvformatdescriptor.html#cfn-lookoutmetrics-anomalydetector-csvformatdescriptor-containsheader
- ``p_Delimiter``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-csvformatdescriptor.html#cfn-lookoutmetrics-anomalydetector-csvformatdescriptor-delimiter
- ``p_FileCompression``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-csvformatdescriptor.html#cfn-lookoutmetrics-anomalydetector-csvformatdescriptor-filecompression
- ``p_HeaderList``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-csvformatdescriptor.html#cfn-lookoutmetrics-anomalydetector-csvformatdescriptor-headerlist
- ``p_QuoteSymbol``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-csvformatdescriptor.html#cfn-lookoutmetrics-anomalydetector-csvformatdescriptor-quotesymbol
"""
AWS_OBJECT_TYPE = "AWS::LookoutMetrics::AnomalyDetector.CsvFormatDescriptor"
p_Charset: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Charset"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-csvformatdescriptor.html#cfn-lookoutmetrics-anomalydetector-csvformatdescriptor-charset"""
p_ContainsHeader: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "ContainsHeader"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-csvformatdescriptor.html#cfn-lookoutmetrics-anomalydetector-csvformatdescriptor-containsheader"""
p_Delimiter: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Delimiter"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-csvformatdescriptor.html#cfn-lookoutmetrics-anomalydetector-csvformatdescriptor-delimiter"""
p_FileCompression: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "FileCompression"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-csvformatdescriptor.html#cfn-lookoutmetrics-anomalydetector-csvformatdescriptor-filecompression"""
p_HeaderList: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "HeaderList"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-csvformatdescriptor.html#cfn-lookoutmetrics-anomalydetector-csvformatdescriptor-headerlist"""
p_QuoteSymbol: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "QuoteSymbol"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-csvformatdescriptor.html#cfn-lookoutmetrics-anomalydetector-csvformatdescriptor-quotesymbol"""
@attr.s
class AnomalyDetectorVpcConfiguration(Property):
"""
AWS Object Type = "AWS::LookoutMetrics::AnomalyDetector.VpcConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-vpcconfiguration.html
Property Document:
- ``rp_SecurityGroupIdList``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-vpcconfiguration.html#cfn-lookoutmetrics-anomalydetector-vpcconfiguration-securitygroupidlist
- ``rp_SubnetIdList``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-vpcconfiguration.html#cfn-lookoutmetrics-anomalydetector-vpcconfiguration-subnetidlist
"""
AWS_OBJECT_TYPE = "AWS::LookoutMetrics::AnomalyDetector.VpcConfiguration"
rp_SecurityGroupIdList: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "SecurityGroupIdList"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-vpcconfiguration.html#cfn-lookoutmetrics-anomalydetector-vpcconfiguration-securitygroupidlist"""
rp_SubnetIdList: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "SubnetIdList"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-vpcconfiguration.html#cfn-lookoutmetrics-anomalydetector-vpcconfiguration-subnetidlist"""
@attr.s
class AnomalyDetectorRDSSourceConfig(Property):
"""
AWS Object Type = "AWS::LookoutMetrics::AnomalyDetector.RDSSourceConfig"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-rdssourceconfig.html
Property Document:
- ``rp_DBInstanceIdentifier``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-rdssourceconfig.html#cfn-lookoutmetrics-anomalydetector-rdssourceconfig-dbinstanceidentifier
- ``rp_DatabaseHost``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-rdssourceconfig.html#cfn-lookoutmetrics-anomalydetector-rdssourceconfig-databasehost
- ``rp_DatabaseName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-rdssourceconfig.html#cfn-lookoutmetrics-anomalydetector-rdssourceconfig-databasename
- ``rp_DatabasePort``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-rdssourceconfig.html#cfn-lookoutmetrics-anomalydetector-rdssourceconfig-databaseport
- ``rp_RoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-rdssourceconfig.html#cfn-lookoutmetrics-anomalydetector-rdssourceconfig-rolearn
- ``rp_SecretManagerArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-rdssourceconfig.html#cfn-lookoutmetrics-anomalydetector-rdssourceconfig-secretmanagerarn
- ``rp_TableName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-rdssourceconfig.html#cfn-lookoutmetrics-anomalydetector-rdssourceconfig-tablename
- ``rp_VpcConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-rdssourceconfig.html#cfn-lookoutmetrics-anomalydetector-rdssourceconfig-vpcconfiguration
"""
AWS_OBJECT_TYPE = "AWS::LookoutMetrics::AnomalyDetector.RDSSourceConfig"
rp_DBInstanceIdentifier: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "DBInstanceIdentifier"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-rdssourceconfig.html#cfn-lookoutmetrics-anomalydetector-rdssourceconfig-dbinstanceidentifier"""
rp_DatabaseHost: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "DatabaseHost"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-rdssourceconfig.html#cfn-lookoutmetrics-anomalydetector-rdssourceconfig-databasehost"""
rp_DatabaseName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "DatabaseName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-rdssourceconfig.html#cfn-lookoutmetrics-anomalydetector-rdssourceconfig-databasename"""
rp_DatabasePort: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "DatabasePort"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-rdssourceconfig.html#cfn-lookoutmetrics-anomalydetector-rdssourceconfig-databaseport"""
rp_RoleArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RoleArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-rdssourceconfig.html#cfn-lookoutmetrics-anomalydetector-rdssourceconfig-rolearn"""
rp_SecretManagerArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "SecretManagerArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-rdssourceconfig.html#cfn-lookoutmetrics-anomalydetector-rdssourceconfig-secretmanagerarn"""
rp_TableName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "TableName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-rdssourceconfig.html#cfn-lookoutmetrics-anomalydetector-rdssourceconfig-tablename"""
rp_VpcConfiguration: typing.Union['AnomalyDetectorVpcConfiguration', dict] = attr.ib(
default=None,
converter=AnomalyDetectorVpcConfiguration.from_dict,
validator=attr.validators.instance_of(AnomalyDetectorVpcConfiguration),
metadata={AttrMeta.PROPERTY_NAME: "VpcConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-rdssourceconfig.html#cfn-lookoutmetrics-anomalydetector-rdssourceconfig-vpcconfiguration"""
@attr.s
class AnomalyDetectorTimestampColumn(Property):
"""
AWS Object Type = "AWS::LookoutMetrics::AnomalyDetector.TimestampColumn"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-timestampcolumn.html
Property Document:
- ``p_ColumnFormat``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-timestampcolumn.html#cfn-lookoutmetrics-anomalydetector-timestampcolumn-columnformat
- ``p_ColumnName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-timestampcolumn.html#cfn-lookoutmetrics-anomalydetector-timestampcolumn-columnname
"""
AWS_OBJECT_TYPE = "AWS::LookoutMetrics::AnomalyDetector.TimestampColumn"
p_ColumnFormat: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ColumnFormat"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-timestampcolumn.html#cfn-lookoutmetrics-anomalydetector-timestampcolumn-columnformat"""
p_ColumnName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ColumnName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-timestampcolumn.html#cfn-lookoutmetrics-anomalydetector-timestampcolumn-columnname"""
@attr.s
class AnomalyDetectorJsonFormatDescriptor(Property):
"""
AWS Object Type = "AWS::LookoutMetrics::AnomalyDetector.JsonFormatDescriptor"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-jsonformatdescriptor.html
Property Document:
- ``p_Charset``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-jsonformatdescriptor.html#cfn-lookoutmetrics-anomalydetector-jsonformatdescriptor-charset
- ``p_FileCompression``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-jsonformatdescriptor.html#cfn-lookoutmetrics-anomalydetector-jsonformatdescriptor-filecompression
"""
AWS_OBJECT_TYPE = "AWS::LookoutMetrics::AnomalyDetector.JsonFormatDescriptor"
p_Charset: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Charset"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-jsonformatdescriptor.html#cfn-lookoutmetrics-anomalydetector-jsonformatdescriptor-charset"""
p_FileCompression: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "FileCompression"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-jsonformatdescriptor.html#cfn-lookoutmetrics-anomalydetector-jsonformatdescriptor-filecompression"""
@attr.s
class AnomalyDetectorAppFlowConfig(Property):
"""
AWS Object Type = "AWS::LookoutMetrics::AnomalyDetector.AppFlowConfig"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-appflowconfig.html
Property Document:
- ``rp_FlowName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-appflowconfig.html#cfn-lookoutmetrics-anomalydetector-appflowconfig-flowname
- ``rp_RoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-appflowconfig.html#cfn-lookoutmetrics-anomalydetector-appflowconfig-rolearn
"""
AWS_OBJECT_TYPE = "AWS::LookoutMetrics::AnomalyDetector.AppFlowConfig"
rp_FlowName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "FlowName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-appflowconfig.html#cfn-lookoutmetrics-anomalydetector-appflowconfig-flowname"""
rp_RoleArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RoleArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-appflowconfig.html#cfn-lookoutmetrics-anomalydetector-appflowconfig-rolearn"""
@attr.s
class AnomalyDetectorRedshiftSourceConfig(Property):
"""
AWS Object Type = "AWS::LookoutMetrics::AnomalyDetector.RedshiftSourceConfig"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-redshiftsourceconfig.html
Property Document:
- ``rp_ClusterIdentifier``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-redshiftsourceconfig.html#cfn-lookoutmetrics-anomalydetector-redshiftsourceconfig-clusteridentifier
- ``rp_DatabaseHost``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-redshiftsourceconfig.html#cfn-lookoutmetrics-anomalydetector-redshiftsourceconfig-databasehost
- ``rp_DatabaseName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-redshiftsourceconfig.html#cfn-lookoutmetrics-anomalydetector-redshiftsourceconfig-databasename
- ``rp_DatabasePort``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-redshiftsourceconfig.html#cfn-lookoutmetrics-anomalydetector-redshiftsourceconfig-databaseport
- ``rp_RoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-redshiftsourceconfig.html#cfn-lookoutmetrics-anomalydetector-redshiftsourceconfig-rolearn
- ``rp_SecretManagerArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-redshiftsourceconfig.html#cfn-lookoutmetrics-anomalydetector-redshiftsourceconfig-secretmanagerarn
- ``rp_TableName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-redshiftsourceconfig.html#cfn-lookoutmetrics-anomalydetector-redshiftsourceconfig-tablename
- ``rp_VpcConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-redshiftsourceconfig.html#cfn-lookoutmetrics-anomalydetector-redshiftsourceconfig-vpcconfiguration
"""
AWS_OBJECT_TYPE = "AWS::LookoutMetrics::AnomalyDetector.RedshiftSourceConfig"
rp_ClusterIdentifier: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ClusterIdentifier"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-redshiftsourceconfig.html#cfn-lookoutmetrics-anomalydetector-redshiftsourceconfig-clusteridentifier"""
rp_DatabaseHost: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "DatabaseHost"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-redshiftsourceconfig.html#cfn-lookoutmetrics-anomalydetector-redshiftsourceconfig-databasehost"""
rp_DatabaseName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "DatabaseName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-redshiftsourceconfig.html#cfn-lookoutmetrics-anomalydetector-redshiftsourceconfig-databasename"""
rp_DatabasePort: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "DatabasePort"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-redshiftsourceconfig.html#cfn-lookoutmetrics-anomalydetector-redshiftsourceconfig-databaseport"""
rp_RoleArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RoleArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-redshiftsourceconfig.html#cfn-lookoutmetrics-anomalydetector-redshiftsourceconfig-rolearn"""
rp_SecretManagerArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "SecretManagerArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-redshiftsourceconfig.html#cfn-lookoutmetrics-anomalydetector-redshiftsourceconfig-secretmanagerarn"""
rp_TableName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "TableName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-redshiftsourceconfig.html#cfn-lookoutmetrics-anomalydetector-redshiftsourceconfig-tablename"""
rp_VpcConfiguration: typing.Union['AnomalyDetectorVpcConfiguration', dict] = attr.ib(
default=None,
converter=AnomalyDetectorVpcConfiguration.from_dict,
validator=attr.validators.instance_of(AnomalyDetectorVpcConfiguration),
metadata={AttrMeta.PROPERTY_NAME: "VpcConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-redshiftsourceconfig.html#cfn-lookoutmetrics-anomalydetector-redshiftsourceconfig-vpcconfiguration"""
@attr.s
class AnomalyDetectorMetric(Property):
"""
AWS Object Type = "AWS::LookoutMetrics::AnomalyDetector.Metric"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metric.html
Property Document:
- ``rp_AggregationFunction``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metric.html#cfn-lookoutmetrics-anomalydetector-metric-aggregationfunction
- ``rp_MetricName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metric.html#cfn-lookoutmetrics-anomalydetector-metric-metricname
- ``p_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metric.html#cfn-lookoutmetrics-anomalydetector-metric-namespace
"""
AWS_OBJECT_TYPE = "AWS::LookoutMetrics::AnomalyDetector.Metric"
rp_AggregationFunction: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "AggregationFunction"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metric.html#cfn-lookoutmetrics-anomalydetector-metric-aggregationfunction"""
rp_MetricName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "MetricName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metric.html#cfn-lookoutmetrics-anomalydetector-metric-metricname"""
p_Namespace: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Namespace"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metric.html#cfn-lookoutmetrics-anomalydetector-metric-namespace"""
@attr.s
class AnomalyDetectorCloudwatchConfig(Property):
"""
AWS Object Type = "AWS::LookoutMetrics::AnomalyDetector.CloudwatchConfig"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-cloudwatchconfig.html
Property Document:
- ``rp_RoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-cloudwatchconfig.html#cfn-lookoutmetrics-anomalydetector-cloudwatchconfig-rolearn
"""
AWS_OBJECT_TYPE = "AWS::LookoutMetrics::AnomalyDetector.CloudwatchConfig"
rp_RoleArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RoleArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-cloudwatchconfig.html#cfn-lookoutmetrics-anomalydetector-cloudwatchconfig-rolearn"""
@attr.s
class AnomalyDetectorFileFormatDescriptor(Property):
"""
AWS Object Type = "AWS::LookoutMetrics::AnomalyDetector.FileFormatDescriptor"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-fileformatdescriptor.html
Property Document:
- ``p_CsvFormatDescriptor``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-fileformatdescriptor.html#cfn-lookoutmetrics-anomalydetector-fileformatdescriptor-csvformatdescriptor
- ``p_JsonFormatDescriptor``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-fileformatdescriptor.html#cfn-lookoutmetrics-anomalydetector-fileformatdescriptor-jsonformatdescriptor
"""
AWS_OBJECT_TYPE = "AWS::LookoutMetrics::AnomalyDetector.FileFormatDescriptor"
p_CsvFormatDescriptor: typing.Union['AnomalyDetectorCsvFormatDescriptor', dict] = attr.ib(
default=None,
converter=AnomalyDetectorCsvFormatDescriptor.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(AnomalyDetectorCsvFormatDescriptor)),
metadata={AttrMeta.PROPERTY_NAME: "CsvFormatDescriptor"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-fileformatdescriptor.html#cfn-lookoutmetrics-anomalydetector-fileformatdescriptor-csvformatdescriptor"""
p_JsonFormatDescriptor: typing.Union['AnomalyDetectorJsonFormatDescriptor', dict] = attr.ib(
default=None,
converter=AnomalyDetectorJsonFormatDescriptor.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(AnomalyDetectorJsonFormatDescriptor)),
metadata={AttrMeta.PROPERTY_NAME: "JsonFormatDescriptor"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-fileformatdescriptor.html#cfn-lookoutmetrics-anomalydetector-fileformatdescriptor-jsonformatdescriptor"""
@attr.s
class AnomalyDetectorS3SourceConfig(Property):
"""
AWS Object Type = "AWS::LookoutMetrics::AnomalyDetector.S3SourceConfig"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-s3sourceconfig.html
Property Document:
- ``rp_FileFormatDescriptor``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-s3sourceconfig.html#cfn-lookoutmetrics-anomalydetector-s3sourceconfig-fileformatdescriptor
- ``rp_RoleArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-s3sourceconfig.html#cfn-lookoutmetrics-anomalydetector-s3sourceconfig-rolearn
- ``p_HistoricalDataPathList``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-s3sourceconfig.html#cfn-lookoutmetrics-anomalydetector-s3sourceconfig-historicaldatapathlist
- ``p_TemplatedPathList``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-s3sourceconfig.html#cfn-lookoutmetrics-anomalydetector-s3sourceconfig-templatedpathlist
"""
AWS_OBJECT_TYPE = "AWS::LookoutMetrics::AnomalyDetector.S3SourceConfig"
rp_FileFormatDescriptor: typing.Union['AnomalyDetectorFileFormatDescriptor', dict] = attr.ib(
default=None,
converter=AnomalyDetectorFileFormatDescriptor.from_dict,
validator=attr.validators.instance_of(AnomalyDetectorFileFormatDescriptor),
metadata={AttrMeta.PROPERTY_NAME: "FileFormatDescriptor"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-s3sourceconfig.html#cfn-lookoutmetrics-anomalydetector-s3sourceconfig-fileformatdescriptor"""
rp_RoleArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "RoleArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-s3sourceconfig.html#cfn-lookoutmetrics-anomalydetector-s3sourceconfig-rolearn"""
p_HistoricalDataPathList: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "HistoricalDataPathList"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-s3sourceconfig.html#cfn-lookoutmetrics-anomalydetector-s3sourceconfig-historicaldatapathlist"""
p_TemplatedPathList: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "TemplatedPathList"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-s3sourceconfig.html#cfn-lookoutmetrics-anomalydetector-s3sourceconfig-templatedpathlist"""
@attr.s
class AnomalyDetectorMetricSource(Property):
"""
AWS Object Type = "AWS::LookoutMetrics::AnomalyDetector.MetricSource"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricsource.html
Property Document:
- ``p_AppFlowConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricsource.html#cfn-lookoutmetrics-anomalydetector-metricsource-appflowconfig
- ``p_CloudwatchConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricsource.html#cfn-lookoutmetrics-anomalydetector-metricsource-cloudwatchconfig
- ``p_RDSSourceConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricsource.html#cfn-lookoutmetrics-anomalydetector-metricsource-rdssourceconfig
- ``p_RedshiftSourceConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricsource.html#cfn-lookoutmetrics-anomalydetector-metricsource-redshiftsourceconfig
- ``p_S3SourceConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricsource.html#cfn-lookoutmetrics-anomalydetector-metricsource-s3sourceconfig
"""
AWS_OBJECT_TYPE = "AWS::LookoutMetrics::AnomalyDetector.MetricSource"
p_AppFlowConfig: typing.Union['AnomalyDetectorAppFlowConfig', dict] = attr.ib(
default=None,
converter=AnomalyDetectorAppFlowConfig.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(AnomalyDetectorAppFlowConfig)),
metadata={AttrMeta.PROPERTY_NAME: "AppFlowConfig"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricsource.html#cfn-lookoutmetrics-anomalydetector-metricsource-appflowconfig"""
p_CloudwatchConfig: typing.Union['AnomalyDetectorCloudwatchConfig', dict] = attr.ib(
default=None,
converter=AnomalyDetectorCloudwatchConfig.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(AnomalyDetectorCloudwatchConfig)),
metadata={AttrMeta.PROPERTY_NAME: "CloudwatchConfig"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricsource.html#cfn-lookoutmetrics-anomalydetector-metricsource-cloudwatchconfig"""
p_RDSSourceConfig: typing.Union['AnomalyDetectorRDSSourceConfig', dict] = attr.ib(
default=None,
converter=AnomalyDetectorRDSSourceConfig.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(AnomalyDetectorRDSSourceConfig)),
metadata={AttrMeta.PROPERTY_NAME: "RDSSourceConfig"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricsource.html#cfn-lookoutmetrics-anomalydetector-metricsource-rdssourceconfig"""
p_RedshiftSourceConfig: typing.Union['AnomalyDetectorRedshiftSourceConfig', dict] = attr.ib(
default=None,
converter=AnomalyDetectorRedshiftSourceConfig.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(AnomalyDetectorRedshiftSourceConfig)),
metadata={AttrMeta.PROPERTY_NAME: "RedshiftSourceConfig"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricsource.html#cfn-lookoutmetrics-anomalydetector-metricsource-redshiftsourceconfig"""
p_S3SourceConfig: typing.Union['AnomalyDetectorS3SourceConfig', dict] = attr.ib(
default=None,
converter=AnomalyDetectorS3SourceConfig.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(AnomalyDetectorS3SourceConfig)),
metadata={AttrMeta.PROPERTY_NAME: "S3SourceConfig"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricsource.html#cfn-lookoutmetrics-anomalydetector-metricsource-s3sourceconfig"""
@attr.s
class AnomalyDetectorMetricSet(Property):
"""
AWS Object Type = "AWS::LookoutMetrics::AnomalyDetector.MetricSet"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricset.html
Property Document:
- ``rp_MetricList``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricset.html#cfn-lookoutmetrics-anomalydetector-metricset-metriclist
- ``rp_MetricSetName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricset.html#cfn-lookoutmetrics-anomalydetector-metricset-metricsetname
- ``rp_MetricSource``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricset.html#cfn-lookoutmetrics-anomalydetector-metricset-metricsource
- ``p_DimensionList``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricset.html#cfn-lookoutmetrics-anomalydetector-metricset-dimensionlist
- ``p_MetricSetDescription``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricset.html#cfn-lookoutmetrics-anomalydetector-metricset-metricsetdescription
- ``p_MetricSetFrequency``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricset.html#cfn-lookoutmetrics-anomalydetector-metricset-metricsetfrequency
- ``p_Offset``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricset.html#cfn-lookoutmetrics-anomalydetector-metricset-offset
- ``p_TimestampColumn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricset.html#cfn-lookoutmetrics-anomalydetector-metricset-timestampcolumn
- ``p_Timezone``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricset.html#cfn-lookoutmetrics-anomalydetector-metricset-timezone
"""
AWS_OBJECT_TYPE = "AWS::LookoutMetrics::AnomalyDetector.MetricSet"
rp_MetricList: typing.List[typing.Union['AnomalyDetectorMetric', dict]] = attr.ib(
default=None,
converter=AnomalyDetectorMetric.from_list,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(AnomalyDetectorMetric), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "MetricList"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricset.html#cfn-lookoutmetrics-anomalydetector-metricset-metriclist"""
rp_MetricSetName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "MetricSetName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricset.html#cfn-lookoutmetrics-anomalydetector-metricset-metricsetname"""
rp_MetricSource: typing.Union['AnomalyDetectorMetricSource', dict] = attr.ib(
default=None,
converter=AnomalyDetectorMetricSource.from_dict,
validator=attr.validators.instance_of(AnomalyDetectorMetricSource),
metadata={AttrMeta.PROPERTY_NAME: "MetricSource"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricset.html#cfn-lookoutmetrics-anomalydetector-metricset-metricsource"""
p_DimensionList: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "DimensionList"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricset.html#cfn-lookoutmetrics-anomalydetector-metricset-dimensionlist"""
p_MetricSetDescription: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "MetricSetDescription"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricset.html#cfn-lookoutmetrics-anomalydetector-metricset-metricsetdescription"""
p_MetricSetFrequency: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "MetricSetFrequency"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricset.html#cfn-lookoutmetrics-anomalydetector-metricset-metricsetfrequency"""
p_Offset: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "Offset"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricset.html#cfn-lookoutmetrics-anomalydetector-metricset-offset"""
p_TimestampColumn: typing.Union['AnomalyDetectorTimestampColumn', dict] = attr.ib(
default=None,
converter=AnomalyDetectorTimestampColumn.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(AnomalyDetectorTimestampColumn)),
metadata={AttrMeta.PROPERTY_NAME: "TimestampColumn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricset.html#cfn-lookoutmetrics-anomalydetector-metricset-timestampcolumn"""
p_Timezone: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Timezone"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lookoutmetrics-anomalydetector-metricset.html#cfn-lookoutmetrics-anomalydetector-metricset-timezone"""
#--- Resource declaration ---
@attr.s
class Alert(Resource):
"""
AWS Object Type = "AWS::LookoutMetrics::Alert"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-alert.html
Property Document:
- ``rp_Action``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-alert.html#cfn-lookoutmetrics-alert-action
- ``rp_AlertSensitivityThreshold``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-alert.html#cfn-lookoutmetrics-alert-alertsensitivitythreshold
- ``rp_AnomalyDetectorArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-alert.html#cfn-lookoutmetrics-alert-anomalydetectorarn
- ``p_AlertDescription``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-alert.html#cfn-lookoutmetrics-alert-alertdescription
- ``p_AlertName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-alert.html#cfn-lookoutmetrics-alert-alertname
"""
AWS_OBJECT_TYPE = "AWS::LookoutMetrics::Alert"
rp_Action: dict = attr.ib(
default=None,
validator=attr.validators.instance_of(dict),
metadata={AttrMeta.PROPERTY_NAME: "Action"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-alert.html#cfn-lookoutmetrics-alert-action"""
rp_AlertSensitivityThreshold: int = attr.ib(
default=None,
validator=attr.validators.instance_of(int),
metadata={AttrMeta.PROPERTY_NAME: "AlertSensitivityThreshold"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-alert.html#cfn-lookoutmetrics-alert-alertsensitivitythreshold"""
rp_AnomalyDetectorArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "AnomalyDetectorArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-alert.html#cfn-lookoutmetrics-alert-anomalydetectorarn"""
p_AlertDescription: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "AlertDescription"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-alert.html#cfn-lookoutmetrics-alert-alertdescription"""
p_AlertName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "AlertName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-alert.html#cfn-lookoutmetrics-alert-alertname"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-alert.html#aws-resource-lookoutmetrics-alert-return-values"""
return GetAtt(resource=self, attr_name="Arn")
@attr.s
class AnomalyDetector(Resource):
"""
AWS Object Type = "AWS::LookoutMetrics::AnomalyDetector"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-anomalydetector.html
Property Document:
- ``rp_AnomalyDetectorConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-anomalydetector.html#cfn-lookoutmetrics-anomalydetector-anomalydetectorconfig
- ``rp_MetricSetList``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-anomalydetector.html#cfn-lookoutmetrics-anomalydetector-metricsetlist
- ``p_AnomalyDetectorDescription``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-anomalydetector.html#cfn-lookoutmetrics-anomalydetector-anomalydetectordescription
- ``p_AnomalyDetectorName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-anomalydetector.html#cfn-lookoutmetrics-anomalydetector-anomalydetectorname
- ``p_KmsKeyArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-anomalydetector.html#cfn-lookoutmetrics-anomalydetector-kmskeyarn
"""
AWS_OBJECT_TYPE = "AWS::LookoutMetrics::AnomalyDetector"
rp_AnomalyDetectorConfig: dict = attr.ib(
default=None,
validator=attr.validators.instance_of(dict),
metadata={AttrMeta.PROPERTY_NAME: "AnomalyDetectorConfig"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-anomalydetector.html#cfn-lookoutmetrics-anomalydetector-anomalydetectorconfig"""
rp_MetricSetList: typing.List[typing.Union['AnomalyDetectorMetricSet', dict]] = attr.ib(
default=None,
converter=AnomalyDetectorMetricSet.from_list,
validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(AnomalyDetectorMetricSet), iterable_validator=attr.validators.instance_of(list)),
metadata={AttrMeta.PROPERTY_NAME: "MetricSetList"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-anomalydetector.html#cfn-lookoutmetrics-anomalydetector-metricsetlist"""
p_AnomalyDetectorDescription: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "AnomalyDetectorDescription"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-anomalydetector.html#cfn-lookoutmetrics-anomalydetector-anomalydetectordescription"""
p_AnomalyDetectorName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "AnomalyDetectorName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-anomalydetector.html#cfn-lookoutmetrics-anomalydetector-anomalydetectorname"""
p_KmsKeyArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "KmsKeyArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-anomalydetector.html#cfn-lookoutmetrics-anomalydetector-kmskeyarn"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lookoutmetrics-anomalydetector.html#aws-resource-lookoutmetrics-anomalydetector-return-values"""
return GetAtt(resource=self, attr_name="Arn")
| 72.952312 | 244 | 0.792247 | 50,063 | 0.99168 | 0 | 0 | 50,183 | 0.994057 | 0 | 0 | 33,008 | 0.653844 |
b2bb4d62eb2627e400fa61b892f36a5ac1c442b5 | 654 | py | Python | mlcomp/db/core/options.py | sUeharaE4/mlcomp | 1e0c23f84622ea4f72eb4e2354cef5ee16b767f4 | [
"Apache-2.0"
] | 166 | 2019-08-21T20:00:04.000Z | 2020-05-14T16:13:57.000Z | mlcomp/db/core/options.py | sUeharaE4/mlcomp | 1e0c23f84622ea4f72eb4e2354cef5ee16b767f4 | [
"Apache-2.0"
] | 14 | 2019-08-22T07:58:39.000Z | 2020-04-13T13:59:07.000Z | mlcomp/db/core/options.py | sUeharaE4/mlcomp | 1e0c23f84622ea4f72eb4e2354cef5ee16b767f4 | [
"Apache-2.0"
] | 22 | 2019-08-23T12:37:20.000Z | 2020-04-20T10:06:29.000Z | class PaginatorOptions:
def __init__(
self,
page_number: int,
page_size: int,
sort_column: str = None,
sort_descending: bool = None
):
self.sort_column = sort_column
self.sort_descending = sort_descending
self.page_number = page_number
self.page_size = page_size
assert (page_number is not None and page_size) \
or (page_number is not None and not page_size), \
'Specify both page_number and page_size'
if not sort_column:
self.sort_column = 'id'
self.sort_descending = True
__all__ = ['PaginatorOptions']
| 27.25 | 61 | 0.610092 | 620 | 0.948012 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.094801 |
b2bd1fc6f7777c13168c679b65bd978ef82ec6d2 | 164 | py | Python | pbxproj/pbxsections/PBXResourcesBuildPhase.py | JoliChen/mod-pbxproj | 24994416eec9cec838dce696c3cc9262c01ba883 | [
"MIT"
] | 1 | 2020-01-16T08:33:38.000Z | 2020-01-16T08:33:38.000Z | pbxproj/pbxsections/PBXResourcesBuildPhase.py | JoliChen/mod-pbxproj | 24994416eec9cec838dce696c3cc9262c01ba883 | [
"MIT"
] | null | null | null | pbxproj/pbxsections/PBXResourcesBuildPhase.py | JoliChen/mod-pbxproj | 24994416eec9cec838dce696c3cc9262c01ba883 | [
"MIT"
] | null | null | null | from pbxproj.pbxsections.PBXGenericBuildPhase import *
class PBXResourcesBuildPhase(PBXGenericBuildPhase):
def _get_comment(self):
return 'Resources'
| 23.428571 | 54 | 0.786585 | 106 | 0.646341 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.067073 |
b2bd5b9242c3d57e4f9ef3633085d5a608db500a | 1,014 | py | Python | Week-4/points_and_segments.py | AbhiSaphire/Algorithmic-Toolbox | abc2b9f25b3c473b93b7d8905e7da0b38cd24062 | [
"MIT"
] | 3 | 2020-06-04T09:37:57.000Z | 2020-06-15T22:55:55.000Z | Week-4/points_and_segments.py | AbhiSaphire/Algorithmic-Toolbox | abc2b9f25b3c473b93b7d8905e7da0b38cd24062 | [
"MIT"
] | 1 | 2020-06-23T13:04:43.000Z | 2020-06-23T13:06:25.000Z | Week-4/points_and_segments.py | AbhiSaphire/Algorithmic-Toolbox | abc2b9f25b3c473b93b7d8905e7da0b38cd24062 | [
"MIT"
] | 1 | 2020-10-08T13:06:05.000Z | 2020-10-08T13:06:05.000Z | import sys
from itertools import chain
def fast_count_segments(starts, ends, points):
cnt = [0] * len(points)
start_points = zip(starts, ['l'] * len(starts), range(len(starts)))
end_points = zip(ends, ['r'] * len(ends), range(len(ends)))
point_points = zip(points, ['p'] * len(points), range(len(points)))
sort_list = chain(start_points, end_points, point_points)
sort_list = sorted(sort_list, key=lambda a: (a[0], a[1]))
segment_count = 0
for num, letter, index in sort_list:
if letter == 'l':
segment_count += 1
elif letter == 'r':
segment_count -= 1
else:
cnt[index] = segment_count
return cnt
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n = data[0]
m = data[1]
starts = data[2:2 * n + 2:2]
ends = data[3:2 * n + 2:2]
points = data[2 * n + 2:]
cnt = fast_count_segments(starts, ends, points)
for x in cnt:
print(x, end=' ') | 31.6875 | 71 | 0.580868 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.027613 |
b2bda88384a662721955747a1c788333f427aa38 | 6,822 | py | Python | main.py | anurendra/Web_IE | 4ba95320fd46d3c6fc090f3f095c7c7de78453bb | [
"Apache-2.0"
] | null | null | null | main.py | anurendra/Web_IE | 4ba95320fd46d3c6fc090f3f095c7c7de78453bb | [
"Apache-2.0"
] | null | null | null | main.py | anurendra/Web_IE | 4ba95320fd46d3c6fc090f3f095c7c7de78453bb | [
"Apache-2.0"
] | null | null | null | import argparse
import numpy as np
import os
import random
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from datasets import custom_collate_fn, load_data, WebDataset
from models import WebObjExtractionNet
from train import train_model, evaluate_model
from utils import print_and_log
########## CMDLINE ARGS ##########
parser = argparse.ArgumentParser('Train Model')
parser.add_argument('-d', '--device', type=int, default=0)
parser.add_argument('-e', '--n_epochs', type=int, default=100)
parser.add_argument('-bb', '--backbone', type=str, default='alexnet', choices=['alexnet', 'resnet'])
parser.add_argument('-tc', '--trainable_convnet', type=int, default=1, choices=[0,1])
parser.add_argument('-lr', '--learning_rate', type=float, default=0.0005)
parser.add_argument('-bs', '--batch_size', type=int, default=25)
parser.add_argument('-cs', '--context_size', type=int, default=6)
parser.add_argument('-att', '--attention', type=int, default=1, choices=[0,1])
parser.add_argument('-hd', '--hidden_dim', type=int, default=300)
parser.add_argument('-r', '--roi', type=int, default=1)
parser.add_argument('-bbf', '--bbox_feat', type=int, default=1, choices=[0,1])
parser.add_argument('-wd', '--weight_decay', type=float, default=0)
parser.add_argument('-dp', '--drop_prob', type=float, default=0.5)
parser.add_argument('-mbb', '--max_bg_boxes', type=int, default=-1)
parser.add_argument('-nw', '--num_workers', type=int, default=8)
args = parser.parse_args()
device = torch.device('cuda:%d' % args.device if torch.cuda.is_available() else 'cpu')
########## MAKING RESULTS REPRODUCIBLE ##########
seed = 1
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
########## PARAMETERS ##########
N_CLASSES = 4
CLASS_NAMES = ['BG', 'Price', 'Title', 'Image']
IMG_HEIGHT = 1280 # Image assumed to have same height and width
EVAL_INTERVAL = 3 # Number of Epochs after which model is evaluated
NUM_WORKERS = args.num_workers # multithreaded data loading
DATA_DIR = '/shared/data_product_info/v2_8.3k/' # Contains .png and .pkl files for train and test data
OUTPUT_DIR = 'results_attn' # logs are saved here!
# NOTE: if same hyperparameter configuration is run again, previous log file and saved model will be overwritten
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
SPLIT_DIR = 'splits'
train_img_ids = np.loadtxt('%s/train_imgs.txt' % SPLIT_DIR, dtype=np.int32)
val_img_ids = np.loadtxt('%s/val_imgs.txt' % SPLIT_DIR, dtype=np.int32)
test_img_ids = np.loadtxt('%s/test_imgs.txt' % SPLIT_DIR, dtype=np.int32)
test_domains = np.loadtxt('%s/test_domains.txt' % SPLIT_DIR, dtype=str) # for calculating macro accuracy
########## HYPERPARAMETERS ##########
N_EPOCHS = args.n_epochs
BACKBONE = args.backbone
TRAINABLE_CONVNET = bool(args.trainable_convnet)
LEARNING_RATE = args.learning_rate
BATCH_SIZE = args.batch_size
CONTEXT_SIZE = args.context_size
USE_ATTENTION = bool(args.attention)
HIDDEN_DIM = args.hidden_dim
ROI_POOL_OUTPUT_SIZE = (args.roi, args.roi)
USE_BBOX_FEAT = bool(args.bbox_feat)
WEIGHT_DECAY = args.weight_decay
DROP_PROB = args.drop_prob
MAX_BG_BOXES = args.max_bg_boxes if args.max_bg_boxes > 0 else -1
params = '%s lr-%.0e batch-%d cs-%d att-%d hd-%d roi-%d bbf-%d wd-%.0e dp-%.2f mbb-%d' % (BACKBONE, LEARNING_RATE, BATCH_SIZE, CONTEXT_SIZE, USE_ATTENTION,
HIDDEN_DIM, ROI_POOL_OUTPUT_SIZE[0], USE_BBOX_FEAT, WEIGHT_DECAY, DROP_PROB, MAX_BG_BOXES)
log_file = '%s/%s logs.txt' % (OUTPUT_DIR, params)
test_acc_domainwise_file = '%s/%s test_acc_domainwise.csv' % (OUTPUT_DIR, params)
model_save_file = '%s/%s saved_model.pth' % (OUTPUT_DIR, params)
print('logs will be saved in \"%s\"' % (log_file))
print_and_log('Backbone Convnet: %s' % (BACKBONE), log_file, 'w')
print_and_log('Trainable Convnet: %s' % (TRAINABLE_CONVNET), log_file)
print_and_log('Learning Rate: %.0e' % (LEARNING_RATE), log_file)
print_and_log('Batch Size: %d' % (BATCH_SIZE), log_file)
print_and_log('Context Size: %d' % (CONTEXT_SIZE), log_file)
print_and_log('Attention: %s' % (USE_ATTENTION), log_file)
print_and_log('Hidden Dim: %d' % (HIDDEN_DIM), log_file)
print_and_log('RoI Pool Output Size: (%d, %d)' % ROI_POOL_OUTPUT_SIZE, log_file)
print_and_log('BBox Features: %s' % (USE_BBOX_FEAT), log_file)
print_and_log('Weight Decay: %.0e' % (WEIGHT_DECAY), log_file)
print_and_log('Dropout Probability: %.2f' % (DROP_PROB), log_file)
print_and_log('Max BG Boxes: %d\n' % (MAX_BG_BOXES), log_file)
########## DATA LOADERS ##########
train_loader, val_loader, test_loader = load_data(DATA_DIR, train_img_ids, val_img_ids, test_img_ids, CONTEXT_SIZE, BATCH_SIZE, NUM_WORKERS, MAX_BG_BOXES)
########## CREATE MODEL & LOSS FN ##########
model = WebObjExtractionNet(ROI_POOL_OUTPUT_SIZE, IMG_HEIGHT, N_CLASSES, BACKBONE, USE_ATTENTION, HIDDEN_DIM, TRAINABLE_CONVNET, DROP_PROB,
USE_BBOX_FEAT, CLASS_NAMES).to(device)
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
criterion = nn.CrossEntropyLoss(reduction='sum').to(device)
########## TRAIN MODEL ##########
train_model(model, train_loader, optimizer, criterion, N_EPOCHS, device, val_loader, EVAL_INTERVAL, log_file, 'ckpt_%d.pth' % args.device)
########## EVALUATE TEST PERFORMANCE ##########
print('Evaluating test data class wise accuracies...')
evaluate_model(model, test_loader, criterion, device, 'TEST', log_file)
with open (test_acc_domainwise_file, 'w') as f:
f.write('Domain,N_examples,%s,%s,%s\n' % (CLASS_NAMES[1], CLASS_NAMES[2], CLASS_NAMES[3]))
print('Evaluating per domain accuracy for %d test domains...' % len(test_domains))
for domain in test_domains:
print('\n---> Domain:', domain)
test_dataset = WebDataset(DATA_DIR, np.loadtxt('%s/domain_wise_imgs/%s.txt' % (SPLIT_DIR, domain), np.int32).reshape(-1), CONTEXT_SIZE, max_bg_boxes=-1)
test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=NUM_WORKERS, collate_fn=custom_collate_fn, drop_last=False)
per_class_acc = evaluate_model(model, test_loader, criterion, device, 'TEST')
with open (test_acc_domainwise_file, 'a') as f:
f.write('%s,%d,%.2f,%.2f,%.2f\n' % (domain, len(test_dataset), 100*per_class_acc[1], 100*per_class_acc[2], 100*per_class_acc[3]))
macro_acc_test = np.loadtxt(test_acc_domainwise_file, delimiter=',', skiprows=1, dtype=str)[:,2:].astype(np.float32).mean(0)
for i in range(1, len(CLASS_NAMES)):
print_and_log('%s Macro Acc: %.2f%%' % (CLASS_NAMES[i], macro_acc_test[i-1]), log_file)
########## SAVE MODEL ##########
torch.save(model.state_dict(), model_save_file)
print_and_log('Model can be restored from \"%s\"' % (model_save_file), log_file)
| 48.728571 | 156 | 0.726327 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,987 | 0.291264 |
b2be278f644c23228acf1f8fcc520ef3e2a07fe5 | 2,846 | py | Python | rclpy/actions/minimal_action_client/examples_rclpy_minimal_action_client/client_cancel.py | emersonknapp/examples | 36522787da5de2a2ff322d8953e3ae4b8e8ee9e7 | [
"Apache-2.0"
] | 1 | 2020-03-17T18:19:55.000Z | 2020-03-17T18:19:55.000Z | rclpy/actions/minimal_action_client/examples_rclpy_minimal_action_client/client_cancel.py | emersonknapp/examples | 36522787da5de2a2ff322d8953e3ae4b8e8ee9e7 | [
"Apache-2.0"
] | null | null | null | rclpy/actions/minimal_action_client/examples_rclpy_minimal_action_client/client_cancel.py | emersonknapp/examples | 36522787da5de2a2ff322d8953e3ae4b8e8ee9e7 | [
"Apache-2.0"
] | 1 | 2020-07-11T08:59:03.000Z | 2020-07-11T08:59:03.000Z | # Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from action_msgs.msg import GoalStatus
from example_interfaces.action import Fibonacci
import rclpy
from rclpy.action import ActionClient
from rclpy.callback_groups import ReentrantCallbackGroup
from rclpy.node import Node
from rclpy.timer import WallTimer
class MinimalActionClient(Node):
def __init__(self):
super().__init__('minimal_action_client')
self._action_client = ActionClient(self, Fibonacci, 'fibonacci')
def cancel_done(self, future):
cancel_response = future.result()
if len(cancel_response.goals_canceling) > 0:
self.get_logger().info('Goal successfully canceled')
else:
self.get_logger().info('Goal failed to cancel')
rclpy.shutdown()
def goal_response_callback(self, future):
goal_handle = future.result()
if not goal_handle.accepted:
self.get_logger().info('Goal rejected :(')
return
self._goal_handle = goal_handle
self.get_logger().info('Goal accepted :)')
# Start a 2 second timer
self._timer = self.create_timer(2.0, self.timer_callback)
def feedback_callback(self, feedback):
self.get_logger().info('Received feedback: {0}'.format(feedback.feedback.sequence))
def timer_callback(self):
self.get_logger().info('Canceling goal')
# Cancel the goal
future = self._goal_handle.cancel_goal_async()
future.add_done_callback(self.cancel_done)
# Cancel the timer
self._timer.cancel()
def send_goal(self):
self.get_logger().info('Waiting for action server...')
self._action_client.wait_for_server()
goal_msg = Fibonacci.Goal()
goal_msg.order = 10
self.get_logger().info('Sending goal request...')
self._send_goal_future = self._action_client.send_goal_async(
goal_msg,
feedback_callback=self.feedback_callback)
self._send_goal_future.add_done_callback(self.goal_response_callback)
def main(args=None):
rclpy.init(args=args)
action_client = MinimalActionClient()
action_client.send_goal()
rclpy.spin(action_client)
action_client.destroy_node()
if __name__ == '__main__':
main()
| 29.957895 | 91 | 0.697119 | 1,755 | 0.616655 | 0 | 0 | 0 | 0 | 0 | 0 | 873 | 0.306746 |
b2bf04c3b73ed2e5d7a5d3616651ad7a3f22eac7 | 1,170 | py | Python | buffers/introspective_buffer.py | GittiHab/mbrl-thesis-code | 10ecd6ef7cbb2df4bd03ce9928e344eab4238a2e | [
"MIT"
] | null | null | null | buffers/introspective_buffer.py | GittiHab/mbrl-thesis-code | 10ecd6ef7cbb2df4bd03ce9928e344eab4238a2e | [
"MIT"
] | null | null | null | buffers/introspective_buffer.py | GittiHab/mbrl-thesis-code | 10ecd6ef7cbb2df4bd03ce9928e344eab4238a2e | [
"MIT"
] | null | null | null | import numpy as np
from typing import Union, Optional, List, Dict, Any
from buffers.chunk_buffer import ChunkReplayBuffer
class IntrospectiveChunkReplayBuffer(ChunkReplayBuffer):
def __init__(self, buffer_size: int, *args, **kwargs):
super().__init__(buffer_size, *args, **kwargs)
self.sample_counts = np.zeros((buffer_size,), dtype=np.int)
self.first_access = np.zeros((buffer_size,), dtype=np.int) - 1
def _log_indices(self, indices):
self.sample_counts[indices] += 1
mask = np.zeros_like(self.first_access, dtype=bool)
mask[indices] = 1
self.first_access[(self.first_access == -1) & mask] = self.pos
def add(self,
obs: np.ndarray,
next_obs: np.ndarray,
action: np.ndarray,
reward: np.ndarray,
done: np.ndarray,
infos: List[Dict[str, Any]]
):
super().add(obs, next_obs, action, reward, done, infos)
def _get_chunk_batches(self, beginnings):
sampled_indices = super()._get_chunk_batches(beginnings)
self._log_indices(sampled_indices.flatten())
return sampled_indices
| 35.454545 | 70 | 0.638462 | 1,045 | 0.893162 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
b2bff192f3852a8121825cff9ab0d2dc48bcad15 | 999 | py | Python | esp8266/boot.py | AlexGolovko/UltrasonicDeeper | 598020854a1bff433bce1582bf05625a6cb646c8 | [
"MIT"
] | 3 | 2020-04-21T10:51:38.000Z | 2022-03-10T18:23:56.000Z | esp8266/boot.py | AlexGolovko/UltrasonicDeeper | 598020854a1bff433bce1582bf05625a6cb646c8 | [
"MIT"
] | 5 | 2020-09-05T22:53:54.000Z | 2021-05-05T14:31:35.000Z | esp8266/boot.py | AlexGolovko/UltrasonicDeeper | 598020854a1bff433bce1582bf05625a6cb646c8 | [
"MIT"
] | 2 | 2021-01-24T19:18:42.000Z | 2021-02-26T09:41:54.000Z | # This file is executed on every boot (including wake-boot from deepsleep)
import esp
import gc
import machine
import network
esp.osdebug(None)
# machine.freq(160000000)
def do_connect(wifi_name, wifi_pass):
ssid = 'microsonar'
password = 'microsonar'
ap_if = network.WLAN(network.AP_IF)
ap_if.active(True)
# ap_if.config(essid=ssid, password=password)
ap_if.config(essid=ssid, authmode=network.AUTH_OPEN)
while not ap_if.active():
pass
print('Access Point created')
print(ap_if.ifconfig())
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
wlans = wlan.scan()
if wifi_name in str(wlans):
print('connecting to network...')
wlan.connect(wifi_name, wifi_pass)
while not wlan.isconnected():
pass
print('network config:', wlan.ifconfig())
else:
wlan.active(False)
machine.Pin(2, machine.Pin.OUT).off()
do_connect('royter', 'traveller22')
gc.collect()
print('wifi connected')
| 23.785714 | 74 | 0.672673 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 270 | 0.27027 |
b2c0e6ac73650986189a517a410915048cd910a4 | 3,326 | py | Python | bin/check_ysim.py | ACTCollaboration/tilec | 11ed8d027ad6ffac09b3e291a047f33e97673f14 | [
"BSD-3-Clause"
] | 1 | 2021-01-04T14:51:44.000Z | 2021-01-04T14:51:44.000Z | bin/check_ysim.py | ACTCollaboration/tilec | 11ed8d027ad6ffac09b3e291a047f33e97673f14 | [
"BSD-3-Clause"
] | 4 | 2019-09-03T22:19:16.000Z | 2020-07-13T12:38:08.000Z | bin/check_ysim.py | ACTCollaboration/tilec | 11ed8d027ad6ffac09b3e291a047f33e97673f14 | [
"BSD-3-Clause"
] | 1 | 2020-08-10T14:51:11.000Z | 2020-08-10T14:51:11.000Z | from __future__ import print_function
from orphics import maps,io,cosmology,stats
from pixell import enmap
import numpy as np
import os,sys
from tilec import utils as tutils
region = 'deep56'
#region = 'boss'
solution = 'comptony'
tdir = "/scratch/r/rbond/msyriac/data/depot/tilec/v1.2.0_20200324"
dcomb = 'joint'
dfile = tutils.get_generic_fname(tdir,region,solution,deproject=None,data_comb=dcomb,version=None,sim_index=None)
dbeam = tutils.get_generic_fname(tdir,region,solution,deproject=None,data_comb=dcomb,version=None,sim_index=None,beam=True)
sfile = tutils.get_generic_fname(tdir,region,solution,deproject=None,data_comb=dcomb,version=None,sim_index=0)
sbeam = tutils.get_generic_fname(tdir,region,solution,deproject=None,data_comb=dcomb,version=None,sim_index=0,beam=True)
tfile = tutils.get_generic_fname(tdir,region,solution,deproject=None,data_comb=dcomb,version=None,sim_index=0)
cdfile = tutils.get_generic_fname(tdir,region,"cmb",deproject=None,data_comb=dcomb,version=None,sim_index=None)
cdbeam = tutils.get_generic_fname(tdir,region,"cmb",deproject=None,data_comb=dcomb,version=None,sim_index=None,beam=True)
csfile = tutils.get_generic_fname(tdir,region,'cmb',deproject=None,data_comb=dcomb,version=None,sim_index=0)
csbeam = tutils.get_generic_fname(tdir,region,'cmb',deproject=None,data_comb=dcomb,version=None,sim_index=0,beam=True)
ctfile = tutils.get_generic_fname(tdir,region,'cmb',deproject=None,data_comb=dcomb,version=None,sim_index=0)
dmap = enmap.read_map(dfile)
smap = enmap.read_map(sfile)
tmap = enmap.read_map(tfile)
cdmap = enmap.read_map(cdfile)
csmap = enmap.read_map(csfile)
ctmap = enmap.read_map(ctfile)
modlmap = dmap.modlmap()
ls,db = np.loadtxt(dbeam,unpack=True)
dbeam = maps.interp(ls,db)(modlmap)
ls,cdb = np.loadtxt(cdbeam,unpack=True)
cdbeam = maps.interp(ls,cdb)(modlmap)
ls,sb = np.loadtxt(sbeam,unpack=True)
sbeam = maps.interp(ls,sb)(modlmap)
ls,csb = np.loadtxt(csbeam,unpack=True)
csbeam = maps.interp(ls,csb)(modlmap)
#io.hplot(smap,"simmap")
bin_edges = np.arange(20,6000,20)
binner = stats.bin2D(modlmap,bin_edges)
p = lambda x: binner.bin((x*x.conj()).real)
dk = enmap.fft(dmap,normalize='phys')/dbeam
sk = enmap.fft(smap,normalize='phys')/sbeam
# tk = enmap.fft(tmap,normalize='phys')/sbeam
cdk = enmap.fft(cdmap,normalize='phys')/cdbeam
csk = enmap.fft(csmap,normalize='phys')/csbeam
# ctk = enmap.fft(ctmap,normalize='phys')/ctbeam
cents,d1d = p(dk)
cents,s1d = p(sk)
# cents,t1d = p(tk)
cents,cd1d = p(cdk)
cents,cs1d = p(csk)
# cents,ct1d = p(ctk)
pl = io.Plotter(xyscale='linlog',xlabel='l',ylabel='$D^{yy}_l$' ,scalefn = lambda x: x**2./2./np.pi)
#pl = io.Plotter('Dell')
pl.add(cents,d1d,label='data')
pl.add(cents,s1d,label='sim')
# pl.add(cents,t1d,label='new sim')
#pl._ax.set_ylim(1e-14,5e-10)
pl.done("dcomp.png")
pl = io.Plotter(xyscale='linlog',xlabel='l',ylabel='$D^{\\rm{CMB}}_l$' ,scalefn = lambda x: x**2./2./np.pi)
pl.add(cents[cents>5000],cd1d[cents>5000],label='data') # blinding ACT CMB data ell<5000
pl.add(cents,cs1d,label='sim')
# pl.add(cents,ct1d,label='new sim')
#pl._ax.set_ylim(1e-14,5e-10)
pl.done("cdcomp.png")
# pl = io.Plotter(xyscale='linlin',xlabel='l',ylabel='$D^{\\rm{CMB-new}}_l / D^{\\rm{CMB-old}}_l$')
# pl.add(cents,ct1d/cs1d)
# pl.hline(y=1)
# pl._ax.set_ylim(0.85,1.05)
# pl.done("cdcompdiff.png")
| 35.382979 | 123 | 0.746242 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 783 | 0.235418 |
b2c0fe0d284c1df72e6a811ac09a5b401ed7fb9b | 509 | py | Python | tests/schema_mapping/expected/generated_example3.py | loyada/typed-py | 8f946ed0cddb38bf7fd463a4c8111a592ccae31a | [
"MIT"
] | 14 | 2018-02-14T13:28:47.000Z | 2022-02-12T08:03:21.000Z | tests/schema_mapping/expected/generated_example3.py | loyada/typed-py | 8f946ed0cddb38bf7fd463a4c8111a592ccae31a | [
"MIT"
] | 142 | 2017-11-22T14:02:33.000Z | 2022-03-23T21:26:29.000Z | tests/schema_mapping/expected/generated_example3.py | loyada/typed-py | 8f946ed0cddb38bf7fd463a4c8111a592ccae31a | [
"MIT"
] | 4 | 2017-12-14T16:46:45.000Z | 2021-12-15T16:33:31.000Z | from typedpy import *
class Person(Structure):
first_name = String()
last_name = String()
age = Integer(minimum=1)
_required = ['first_name', 'last_name']
class Groups(Structure):
groups = Array(items=Person)
_required = ['groups']
# ********************
class Example1(Structure):
people = Array(items=Person)
id = Integer()
i = Integer()
s = String()
m = Map(items=[String(), Person])
groups = Groups
_required = ['groups', 'id', 'm', 'people']
| 17.551724 | 47 | 0.581532 | 454 | 0.891945 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.149312 |
b2c10ffac29f7bdf64553c51d96d725e726e49a1 | 3,114 | py | Python | rpesk/morse_code.py | LukeJVinton/pi-projects | 9dfa110bb027b0fb281e3dca831f1547bc15faa5 | [
"MIT"
] | null | null | null | rpesk/morse_code.py | LukeJVinton/pi-projects | 9dfa110bb027b0fb281e3dca831f1547bc15faa5 | [
"MIT"
] | null | null | null | rpesk/morse_code.py | LukeJVinton/pi-projects | 9dfa110bb027b0fb281e3dca831f1547bc15faa5 | [
"MIT"
] | null | null | null |
# 02_blink_twice.py
# From the code for the Electronics Starter Kit for the Raspberry Pi by MonkMakes.com
import RPi.GPIO as GPIO
import time
def word_separation(pin):
sleep_time = 7
GPIO.output(pin, False) # True means that LED turns on
time.sleep(sleep_time)
def pulse(pin, length = "dot"):
pulse_time = 0
sleep_time = 1
if length == "dash":
pulse_time = 3
elif length == "dot":
pulse_time = 1
elif length == "stop":
sleep_time = 3
if length != 'stop':
GPIO.output(pin, True) # True means that LED turns on
time.sleep(pulse_time) # delay 0.5 seconds
GPIO.output(pin, False) # True means that LED turns on
time.sleep(sleep_time)
def get_morse_dictionary(letter):
morse_dict = {'a':['dot','dash','stop'],
'b':['dash','dot','dot','dot','stop'],
'c':['dash','dot','dash','dot','stop'],
'd':['dash','dot','dot','stop'],
'e':['dot','stop'],
'f':['dot','dot','dash','dot','stop'],
'g':['dash','dash','dot','stop'],
'h':['dot','dot','dot','dot','stop'],
'i':['dot','dot','stop'],
'j':['dot','dash','dash','dash','stop'],
'k':['dash','dot','dash','stop'],
'l':['dot','dash','dot','dot','stop'],
'm':['dash','dash','stop'],
'n':['dash','dot','stop'],
'o':['dash','dash','dash','stop'],
'p':['dot','dash','dash','dot','stop'],
'q':['dash','dash','dot','dash','stop'],
'r':['dot','dash','dot','stop'],
's':['dot','dot','dot','stop'],
't':['dash','stop'],
'u':['dot','dot','dash','stop'],
'v':['dot','dot','dot','dash','stop'],
'w':['dot','dash','dash','stop'],
'x':['dash','dot','dot','dash','stop'],
'y':['dash','dot','dash','dash','stop'],
'z':['dash','dash','dot','dot','stop'],
}
return morse_dict[letter]
def pulse_letter(letter, pin):
if letter == ' ':
word_separation(pin)
else:
pulse_list = get_morse_dictionary(letter)
for beep in pulse_list:
print(beep)
pulse(pin, beep)
# Configure the Pi to use the BCM (Broadcom) pin names, rather than the pin positions
GPIO.setmode(GPIO.BCM)
red_pin1 = 18
GPIO.setup(red_pin1, GPIO.OUT)
try:
words = input('Enter a word: ')
for letter in words:
pulse_letter(letter, red_pin1)
finally:
print("Cleaning up")
GPIO.cleanup()
# You could get rid of the try: finally: code and just have the while loop
# and its contents. However, the try: finally: construct makes sure that
# when you CTRL-c the program to end it, all the pins are set back to
# being inputs. This helps protect your Pi from accidental shorts-circuits
# if something metal touches the GPIO pins.
| 33.483871 | 85 | 0.495825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,373 | 0.440912 |
b2c14d3bb32a9d0a97a9d773d034e8784a7e69a4 | 5,641 | py | Python | lljs.py | Peter9192/wind_analytics | 604136be1c2ef1155bdb7579c7d123525dbe10d8 | [
"Apache-2.0"
] | null | null | null | lljs.py | Peter9192/wind_analytics | 604136be1c2ef1155bdb7579c7d123525dbe10d8 | [
"Apache-2.0"
] | null | null | null | lljs.py | Peter9192/wind_analytics | 604136be1c2ef1155bdb7579c7d123525dbe10d8 | [
"Apache-2.0"
] | null | null | null | """ Identify low-level jets in wind profile data.
Peter Kalverla
December 2020
"""
import numpy as np
import xarray as xr
def detect_llj(x, axis=None, falloff=0, output='strength', inverse=False):
""" Identify maxima in wind profiles.
args:
- x : ndarray with wind profile data
- axis : specifies the vertical dimension
is internally used with np.apply_along_axis
- falloff : threshold for labeling as low-level jet
default 0; can be masked later, e.g. llj[falloff>2.0]
- output : specifiy return type: 'strength' or 'index'
returns (depending on <output> argument):
- strength : 0 if no maximum identified, otherwise falloff strength
- index : nan if no maximum identified, otherwise index along
<axis>, to get the height of the jet etc.
"""
def inner(x, output):
if inverse:
x = x[::-1, ...]
# Identify local maxima
x = x[~np.isnan(x)]
dx = x[1:] - x[:-1]
ind = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
# Last value of x cannot be llj
if ind.size and ind[-1] == x.size - 1:
ind = ind[:-1]
# Compute the falloff strength for each local maxima
if ind.size: # this assumes height increases along axis!!!
strength = np.array([x[i] - min(x[i:]) for i in ind])
imax = np.argmax(strength)
# Return jet_strength and index of maximum:
if output == 'strength':
r = max(strength) if ind.size else 0
elif output == 'index':
r = ind[imax] if ind.size else 0
return r
# Wrapper interface to apply 1d function to ndarray
return np.apply_along_axis(inner, axis, x, output=output)
def detect_llj_vectorized(xs,
axis=-1,
output='falloff',
mask_inv=False,
inverse=False):
""" Identify local maxima in wind profiles.
args:
- x : ndarray with wind profile data
- axis : specifies the vertical dimension
- output : specifiy return type: 'falloff', 'strength' or 'index'
- mask_inv : use np.ma to mask nan values
returns (depending on <output> argument and whether llj is identified):
- falloff : 0 or largest difference between local max and subseq min
- strength : 0 or wind speed at jet height
- index : -1 or index along <axis>
"""
# Move <axis> to first dimension, to easily index and iterate over it.
xv = np.rollaxis(xs, axis)
if inverse:
xv = xv[::-1, ...]
if mask_inv:
xv = np.ma.masked_invalid(xv)
# Set initial arrays
min_elem = xv[-1].copy()
max_elem = np.zeros(min_elem.shape)
max_diff = np.zeros(min_elem.shape)
max_idx = np.ones(min_elem.shape, dtype=int) * (-1)
# Start at end of array and search backwards for larger differences.
for i, elem in reversed(list(enumerate(xv))):
min_elem = np.minimum(elem, min_elem)
new_max_identified = elem - min_elem > max_diff
max_diff = np.where(new_max_identified, elem - min_elem, max_diff)
max_elem = np.where(new_max_identified, elem, max_elem)
max_idx = np.where(new_max_identified, i, max_idx)
if output == 'falloff':
r = max_diff
elif output == 'strength':
r = max_elem
elif output == 'index':
r = max_idx
else:
raise ValueError('Invalid argument for <output>: %s' % output)
return r
def detect_llj_xarray(da, inverse=False):
""" Identify local maxima in wind profiles.
args:
- da : xarray.DataArray with wind profile data
- inverse : to flip the array if the data is stored upside down
returns: : xarray.Dataset with vertical dimension removed containing:
- falloff : 0 or largest difference between local max and subseq min
- strength : 0 or wind speed at jet height
- index : -1 or index along <axis>
Note: vertical dimension should be labeled 'level' and axis=1
"""
# Move <axis> to first dimension, to easily index and iterate over it.
xv = np.rollaxis(da.values, 1)
if inverse:
xv = xv[::-1, ...]
# Set initial arrays
min_elem = xv[-1].copy()
max_elem = np.zeros(min_elem.shape)
max_diff = np.zeros(min_elem.shape)
max_idx = np.ones(min_elem.shape, dtype=int) * (-1)
# Start at end of array and search backwards for larger differences.
for i, elem in reversed(list(enumerate(xv))):
min_elem = np.minimum(elem, min_elem)
new_max_identified = elem - min_elem > max_diff
max_diff = np.where(new_max_identified, elem - min_elem, max_diff)
max_elem = np.where(new_max_identified, elem, max_elem)
max_idx = np.where(new_max_identified, i, max_idx)
# Combine the results in a dataframe
get_height = lambda i: np.where(i > 0, da.level.values[i], da.level.values[
-1])
dims = da.isel(level=0).drop('level').dims
coords = da.isel(level=0).drop('level').coords
lljs = xr.Dataset(
{
'falloff': (dims, max_diff),
'strength': (dims, max_elem),
'level': (dims, get_height(max_idx)),
},
coords=coords)
print(
'Beware! Level is also filled if no jet is detected! '
'Use ds.sel(level=lljs.level).where(lljs.falloff>0) to get rid of them'
)
return lljs | 34.820988 | 80 | 0.591916 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,740 | 0.485729 |
b2c4382563bdc135f87a0336d22aa149de5f9c44 | 9,203 | py | Python | rollon_erpnext/hooks_property_setter.py | santoshbb/rhplrepo | 8ce4792ea47b66ab2b7aed9da468104a2d37ae2b | [
"MIT"
] | null | null | null | rollon_erpnext/hooks_property_setter.py | santoshbb/rhplrepo | 8ce4792ea47b66ab2b7aed9da468104a2d37ae2b | [
"MIT"
] | null | null | null | rollon_erpnext/hooks_property_setter.py | santoshbb/rhplrepo | 8ce4792ea47b66ab2b7aed9da468104a2d37ae2b | [
"MIT"
] | null | null | null | property_setter = {
"dt": "Property Setter",
"filters": [
["name", "in", [
'Purchase Order-read_only_onload',
'Purchase Order-default_print_format',
'Purchase Invoice-naming_series-options',
'Purchase Invoice-naming_series-default',
'Delivery Note-naming_series-options',
'Delivery Note-naming_series-default',
'Sales Order-naming_series-options',
'Sales Order-naming_series-default',
'Purchase Receipt-naming_series-options',
'Purchase Receipt-naming_series-default',
'Production Order-naming_series-options',
'Production Order-naming_series-default',
'Stock Entry-naming_series-options',
'Stock Entry-naming_series-default',
'Purchase Order-naming_series-options',
'Purchase Order-naming_series-default',
'Sales Invoice-naming_series-options',
'Sales Invoice-naming_series-default',
'Purchase Invoice-read_only_onload',
'Stock Reconciliation-read_only_onload',
'Delivery Note-read_only_onload',
'Stock Entry-read_only_onload',
'Sales Invoice-po_no-read_only',
'Sales Invoice-read_only_onload',
'Purchase Receipt Item-read_only_onload',
'Custom Field-fieldname-width',
'Custom Field-dt-width',
'Sales Invoice Item-read_only_onload',
'Sales Invoice Item-warehouse-default',
'Sales Order-po_no-read_only',
'Sales Order-read_only_onload',
'Item-read_only_onload',
'User-read_only_onload',
'User-sort_field',
'Asset Maintenance Task-periodicity-options',
'Asset Maintenance Task-read_only_onload',
'Asset-read_only_onload',
'Sales Invoice Item-customer_item_code-print_hide',
'Sales Invoice Item-customer_item_code-hidden',
'Sales Order Item-read_only_onload',
'BOM-with_operations-default',
'BOM-read_only_onload',
'Stock Entry-default_print_format',
'Purchase Receipt-read_only_onload',
'Production Order-skip_transfer-default',
'Production Order-skip_transfer-read_only',
'Production Order-use_multi_level_bom-default',
'Production Order-use_multi_level_bom-read_only',
'Production Order-read_only_onload',
'Purchase Order Item-amount-precision',
'Purchase Order Item-read_only_onload',
'Purchase Order Item-rate-precision',
'Stock Entry-use_multi_level_bom-default',
'Stock Entry-use_multi_level_bom-read_only',
'Stock Entry-from_bom-read_only',
'Stock Entry-from_bom-default',
'Stock Entry Detail-barcode-read_only',
'Stock Entry Detail-read_only_onload',
'Stock Entry-to_warehouse-read_only',
'Stock Entry-from_warehouse-read_only',
'Stock Entry-remarks-reqd',
'Purchase Receipt-in_words-print_hide',
'Purchase Receipt-in_words-hidden',
'Purchase Invoice-in_words-print_hide',
'Purchase Invoice-in_words-hidden',
'Purchase Order-in_words-print_hide',
'Purchase Order-in_words-hidden',
'Supplier Quotation-in_words-print_hide',
'Supplier Quotation-in_words-hidden',
'Delivery Note-in_words-print_hide',
'Delivery Note-in_words-hidden',
'Sales Invoice-in_words-print_hide',
'Sales Invoice-in_words-hidden',
'Sales Order-in_words-print_hide',
'Sales Order-in_words-hidden',
'Quotation-in_words-print_hide',
'Quotation-in_words-hidden',
'Purchase Order-rounded_total-print_hide',
'Purchase Order-rounded_total-hidden',
'Purchase Order-base_rounded_total-print_hide',
'Purchase Order-base_rounded_total-hidden',
'Supplier Quotation-rounded_total-print_hide',
'Supplier Quotation-rounded_total-hidden',
'Supplier Quotation-base_rounded_total-print_hide',
'Supplier Quotation-base_rounded_total-hidden',
'Delivery Note-rounded_total-print_hide',
'Delivery Note-rounded_total-hidden',
'Delivery Note-base_rounded_total-print_hide',
'Delivery Note-base_rounded_total-hidden',
'Sales Invoice-rounded_total-print_hide',
'Sales Invoice-rounded_total-hidden',
'Sales Invoice-base_rounded_total-print_hide',
'Sales Invoice-base_rounded_total-hidden',
'Sales Order-rounded_total-print_hide',
'Sales Order-rounded_total-hidden',
'Sales Order-base_rounded_total-print_hide',
'Sales Order-base_rounded_total-hidden',
'Quotation-rounded_total-print_hide',
'Quotation-rounded_total-hidden',
'Quotation-base_rounded_total-print_hide',
'Quotation-base_rounded_total-hidden',
'Dropbox Settings-dropbox_setup_via_site_config-hidden',
'Dropbox Settings-read_only_onload',
'Dropbox Settings-dropbox_access_token-hidden',
'Activity Log-subject-width',
'Employee-employee_number-hidden',
'Employee-employee_number-reqd',
'Employee-naming_series-reqd',
'Employee-naming_series-hidden',
'Supplier-naming_series-hidden',
'Supplier-naming_series-reqd',
'Delivery Note-tax_id-print_hide',
'Delivery Note-tax_id-hidden',
'Sales Invoice-tax_id-print_hide',
'Sales Invoice-tax_id-hidden',
'Sales Order-tax_id-print_hide',
'Sales Order-tax_id-hidden',
'Customer-naming_series-hidden',
'Customer-naming_series-reqd',
'Stock Entry Detail-barcode-hidden',
'Stock Reconciliation Item-barcode-hidden',
'Item-barcode-hidden',
'Delivery Note Item-barcode-hidden',
'Sales Invoice Item-barcode-hidden',
'Purchase Receipt Item-barcode-hidden',
'Item-item_code-reqd',
'Item-item_code-hidden',
'Item-naming_series-hidden',
'Item-naming_series-reqd',
'Item-manufacturing-collapsible_depends_on',
'Purchase Invoice-payment_schedule-print_hide',
'Purchase Invoice-due_date-print_hide',
'Purchase Order-payment_schedule-print_hide',
'Purchase Order-due_date-print_hide',
'Sales Invoice-payment_schedule-print_hide',
'Sales Invoice-due_date-print_hide',
'Sales Order-payment_schedule-print_hide',
'Sales Order-due_date-print_hide',
'Journal Entry Account-sort_order',
'Journal Entry Account-account_currency-print_hide',
'Sales Invoice-taxes_and_charges-reqd',
'Sales Taxes and Charges-sort_order',
'Sales Invoice Item-customer_item_code-label',
'Sales Invoice-default_print_format',
'Purchase Taxes and Charges Template-sort_order',
'Serial No-company-in_standard_filter',
'Serial No-amc_expiry_date-in_standard_filter',
'Serial No-warranty_expiry_date-in_standard_filter',
'Serial No-maintenance_status-in_standard_filter',
'Serial No-customer_name-in_standard_filter',
'Serial No-customer_name-bold',
'Serial No-customer-in_standard_filter',
'Serial No-delivery_document_no-in_standard_filter',
'Serial No-delivery_document_type-in_standard_filter',
'Serial No-supplier_name-bold',
'Serial No-supplier_name-in_standard_filter',
'Serial No-supplier-in_standard_filter',
'Serial No-purchase_date-in_standard_filter',
'Serial No-description-in_standard_filter',
'Delivery Note-section_break1-hidden',
'Delivery Note-sales_team_section_break-hidden',
'Delivery Note-project-hidden',
'Delivery Note-taxes-hidden',
'Delivery Note-taxes_and_charges-hidden',
'Delivery Note-taxes_section-hidden',
'Delivery Note-posting_time-print_hide',
'Delivery Note-posting_time-description',
'Delivery Note Item-warehouse-default',
'Item-income_account-default',
'Item-income_account-depends_on',
'Purchase Receipt-remarks-reqd',
'Purchase Receipt-taxes-hidden',
'Purchase Receipt-taxes_and_charges-hidden',
'Purchase Receipt Item-base_rate-fieldtype',
'Purchase Receipt Item-amount-in_list_view',
'Purchase Receipt Item-rate-fieldtype',
'Purchase Receipt Item-base_price_list_rate-fieldtype',
'Purchase Receipt Item-price_list_rate-fieldtype',
'Purchase Receipt Item-qty-in_list_view',
'Stock Entry-title_field',
'Stock Entry-search_fields',
'Stock Entry-project-hidden',
'Stock Entry-supplier-in_list_view',
'Stock Entry-from_warehouse-in_list_view',
'Stock Entry-to_warehouse-in_list_view',
'Stock Entry-purpose-default',
'ToDo-sort_order',
'Currency Exchange-sort_order',
'Company-abbr-in_list_view',
'Stock Reconciliation-expense_account-in_standard_filter',
'Stock Reconciliation-expense_account-depends_on',
'Sales Order-taxes-hidden',
'Warehouse-sort_order',
'Address-fax-hidden',
'Address-fax-read_only',
'Address-phone-hidden',
'Address-email_id-hidden',
'Address-city-reqd',
'BOM Operation-sort_order',
'BOM Item-scrap-read_only',
'BOM-operations_section-read_only',
'BOM-operations-read_only',
'BOM-rm_cost_as_per-reqd',
'Journal Entry-pay_to_recd_from-allow_on_submit',
'Journal Entry-remark-in_global_search',
'Journal Entry-total_amount-bold',
'Journal Entry-total_amount-print_hide',
'Journal Entry-total_amount-in_list_view',
'Journal Entry-total_credit-print_hide',
'Journal Entry-total_debit-print_hide',
'Journal Entry-total_debit-in_list_view',
'Journal Entry-user_remark-print_hide',
'Stock Entry-to_warehouse-hidden',
'Purchase Order Item-rate-fieldtype',
'Journal Entry Account-exchange_rate-print_hide',
'Sales Invoice Item-item_code-label',
'BOM-rm_cost_as_per-options',
'Purchase Order Item-price_list_rate-fieldtype',
'Reconciliation-expense_account-read_only',
'Customer-tax_id-read_only',
'Purchase Order Item-amount-fieldtype',
'Stock Entry-project-hidden'
]
]
]
} | 40.013043 | 61 | 0.757688 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,042 | 0.873845 |
b2c5e558b71549ec4885e41eca936b455678ffaf | 1,555 | py | Python | api/client/src/pcluster_client/sigv4_auth.py | maclema/aws-parallelcluster | ade6e5e76201ee43c6e222fcd1c2891aba938838 | [
"Apache-2.0"
] | 415 | 2018-11-13T15:02:15.000Z | 2022-03-31T15:26:06.000Z | api/client/src/pcluster_client/sigv4_auth.py | maclema/aws-parallelcluster | ade6e5e76201ee43c6e222fcd1c2891aba938838 | [
"Apache-2.0"
] | 2,522 | 2018-11-13T16:16:27.000Z | 2022-03-31T13:57:10.000Z | api/client/src/pcluster_client/sigv4_auth.py | yuleiwan/aws-parallelcluster | aad2a3019ef4ad08d702f5acf41b152b3f7a0b46 | [
"Apache-2.0"
] | 164 | 2018-11-14T22:47:46.000Z | 2022-03-22T11:33:22.000Z | """Sigv4 Signing Support"""
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy
# of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
import botocore
import json
def sigv4_auth(method, host, path, querys, body, headers):
"Adds authorization headers for sigv4 to headers parameter."
endpoint = host.replace('https://', '').replace('http://', '')
_api_id, _service, region, _domain = endpoint.split('.', maxsplit=3)
request_parameters = '&'.join([f"{k}={v}" for k, v in querys])
url = f"{host}{path}?{request_parameters}"
session = botocore.session.Session()
request = botocore.awsrequest.AWSRequest(method=method,
url=url,
data=json.dumps(body) if body else None)
botocore.auth.SigV4Auth(session.get_credentials(),
"execute-api", region).add_auth(request)
prepared_request = request.prepare()
headers['host'] = endpoint.split('/', maxsplit=1)[0]
for k, value in prepared_request.headers.items():
headers[k] = value
| 39.871795 | 85 | 0.659807 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 730 | 0.469453 |
b2c664ce7bd387984bca2a25d6741d8d39b481e1 | 2,624 | py | Python | django/contrib/sessions/tests.py | rawwell/django | 6b3264671ead4604f26cbd2b71e8d6a02945bf0c | [
"BSD-3-Clause"
] | 1 | 2016-05-08T12:24:22.000Z | 2016-05-08T12:24:22.000Z | django/contrib/sessions/tests.py | rawwell/django | 6b3264671ead4604f26cbd2b71e8d6a02945bf0c | [
"BSD-3-Clause"
] | null | null | null | django/contrib/sessions/tests.py | rawwell/django | 6b3264671ead4604f26cbd2b71e8d6a02945bf0c | [
"BSD-3-Clause"
] | 1 | 2015-11-19T14:45:16.000Z | 2015-11-19T14:45:16.000Z | r"""
>>> from django.conf import settings
>>> from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
>>> from django.contrib.sessions.backends.cache import SessionStore as CacheSession
>>> from django.contrib.sessions.backends.file import SessionStore as FileSession
>>> from django.contrib.sessions.backends.base import SessionBase
>>> db_session = DatabaseSession()
>>> db_session.modified
False
>>> db_session['cat'] = "dog"
>>> db_session.modified
True
>>> db_session.pop('cat')
'dog'
>>> db_session.pop('some key', 'does not exist')
'does not exist'
>>> db_session.save()
>>> db_session.exists(db_session.session_key)
True
>>> db_session.delete(db_session.session_key)
>>> db_session.exists(db_session.session_key)
False
>>> file_session = FileSession()
>>> file_session.modified
False
>>> file_session['cat'] = "dog"
>>> file_session.modified
True
>>> file_session.pop('cat')
'dog'
>>> file_session.pop('some key', 'does not exist')
'does not exist'
>>> file_session.save()
>>> file_session.exists(file_session.session_key)
True
>>> file_session.delete(file_session.session_key)
>>> file_session.exists(file_session.session_key)
False
# Make sure the file backend checks for a good storage dir
>>> settings.SESSION_FILE_PATH = "/if/this/directory/exists/you/have/a/weird/computer"
>>> FileSession()
Traceback (innermost last):
...
ImproperlyConfigured: The session storage path '/if/this/directory/exists/you/have/a/weird/computer' doesn't exist. Please set your SESSION_FILE_PATH setting to an existing directory in which Django can store session data.
>>> cache_session = CacheSession()
>>> cache_session.modified
False
>>> cache_session['cat'] = "dog"
>>> cache_session.modified
True
>>> cache_session.pop('cat')
'dog'
>>> cache_session.pop('some key', 'does not exist')
'does not exist'
>>> cache_session.save()
>>> cache_session.delete(cache_session.session_key)
>>> cache_session.exists(cache_session.session_key)
False
>>> s = SessionBase()
>>> s._session['some key'] = 'exists' # Pre-populate the session with some data
>>> s.accessed = False # Reset to pretend this wasn't accessed previously
>>> s.accessed, s.modified
(False, False)
>>> s.pop('non existant key', 'does not exist')
'does not exist'
>>> s.accessed, s.modified
(True, False)
>>> s.setdefault('foo', 'bar')
'bar'
>>> s.setdefault('foo', 'baz')
'bar'
>>> s.accessed = False # Reset the accessed flag
>>> s.pop('some key')
'exists'
>>> s.accessed, s.modified
(True, True)
>>> s.pop('some key', 'does not exist')
'does not exist'
"""
if __name__ == '__main__':
import doctest
doctest.testmod()
| 27.333333 | 222 | 0.722942 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,564 | 0.977134 |
b2c83a9626d327c18df6c74ffc572fe2774106fd | 1,504 | py | Python | gopage/web_helper.py | wavegu/gopage | ff83cea34a82570627c74c5bad45ebc02ecaaff6 | [
"MIT"
] | 1 | 2017-02-03T10:24:00.000Z | 2017-02-03T10:24:00.000Z | gopage/web_helper.py | wavegu/gopage | ff83cea34a82570627c74c5bad45ebc02ecaaff6 | [
"MIT"
] | null | null | null | gopage/web_helper.py | wavegu/gopage | ff83cea34a82570627c74c5bad45ebc02ecaaff6 | [
"MIT"
] | null | null | null | # encoding: utf-8
import urllib2
from proxy_helper import ProxyHelper
proxyHelper = ProxyHelper()
class WebHelper:
def __init__(self):
pass
@classmethod
def get_page_content_from_url(cls, page_url):
"""
get html content from web page with given url
:param page_url: url of the page to be read
:return: page_content
"""
try:
proxy_ip = 'http://:@' + proxyHelper.choose_proxy()
print 'getting content from [' + page_url + ']', 'ip=' + proxy_ip
# print 'getting content from [' + page_url.decode('utf-8').encode('cp936') + ']', 'ip=' + proxy_ip
proxy = urllib2.ProxyHandler({'http': 'http://:@' + str(proxy_ip)})
auth = urllib2.HTTPBasicAuthHandler()
opener = urllib2.build_opener(proxy, auth, urllib2.HTTPHandler)
opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.1 WOW64 rv:23.0) Gecko/20130406 Firefox/23.0')]
conn = opener.open(page_url)
page_content = conn.read()
return page_content
except urllib2.URLError or urllib2.HTTPError as e:
print '[Error]@WebHelper.get_page_content_from_url:', page_url
print e
return None
if __name__ == '__main__':
page_content = WebHelper.get_page_content_from_url('https://www.google.com/search?hl=en&safe=off&q=wave')
with open('test_result.html', 'w') as test_result:
test_result.write(page_content) | 36.682927 | 122 | 0.621011 | 1,168 | 0.776596 | 0 | 0 | 1,108 | 0.736702 | 0 | 0 | 541 | 0.359707 |
b2cabd96c3fc001d2729753488a402fc76f755f0 | 8,187 | py | Python | tests/test_skipping.py | pytask-dev/pytask | b6769b48abda44c6261b9a7b58865f8844423c13 | [
"MIT"
] | 41 | 2020-07-24T15:19:19.000Z | 2022-03-17T17:40:57.000Z | tests/test_skipping.py | pytask-dev/pytask | b6769b48abda44c6261b9a7b58865f8844423c13 | [
"MIT"
] | 240 | 2020-06-26T21:37:49.000Z | 2022-03-31T08:56:56.000Z | tests/test_skipping.py | pytask-dev/pytask | b6769b48abda44c6261b9a7b58865f8844423c13 | [
"MIT"
] | null | null | null | import textwrap
from contextlib import ExitStack as does_not_raise # noqa: N813
import pytest
from _pytask.mark import Mark
from _pytask.outcomes import Skipped
from _pytask.outcomes import SkippedAncestorFailed
from _pytask.outcomes import SkippedUnchanged
from _pytask.skipping import pytask_execute_task_setup
from pytask import cli
from pytask import main
class DummyClass:
pass
@pytest.mark.end_to_end
def test_skip_unchanged(tmp_path):
source = """
def task_dummy():
pass
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
session = main({"paths": tmp_path})
assert session.execution_reports[0].success
session = main({"paths": tmp_path})
assert isinstance(session.execution_reports[0].exc_info[1], SkippedUnchanged)
@pytest.mark.end_to_end
def test_skip_unchanged_w_dependencies_and_products(tmp_path):
source = """
import pytask
@pytask.mark.depends_on("in.txt")
@pytask.mark.produces("out.txt")
def task_dummy(depends_on, produces):
produces.write_text(depends_on.read_text())
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
tmp_path.joinpath("in.txt").write_text("Original content of in.txt.")
session = main({"paths": tmp_path})
assert session.execution_reports[0].success
assert tmp_path.joinpath("out.txt").read_text() == "Original content of in.txt."
session = main({"paths": tmp_path})
assert isinstance(session.execution_reports[0].exc_info[1], SkippedUnchanged)
assert tmp_path.joinpath("out.txt").read_text() == "Original content of in.txt."
@pytest.mark.end_to_end
def test_skipif_ancestor_failed(tmp_path):
source = """
import pytask
@pytask.mark.produces("out.txt")
def task_first():
assert 0
@pytask.mark.depends_on("out.txt")
def task_second():
pass
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
session = main({"paths": tmp_path})
assert not session.execution_reports[0].success
assert isinstance(session.execution_reports[0].exc_info[1], Exception)
assert not session.execution_reports[1].success
assert isinstance(session.execution_reports[1].exc_info[1], SkippedAncestorFailed)
@pytest.mark.end_to_end
def test_if_skip_decorator_is_applied_to_following_tasks(tmp_path):
source = """
import pytask
@pytask.mark.skip
@pytask.mark.produces("out.txt")
def task_first():
assert 0
@pytask.mark.depends_on("out.txt")
def task_second():
pass
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
session = main({"paths": tmp_path})
assert session.execution_reports[0].success
assert isinstance(session.execution_reports[0].exc_info[1], Skipped)
assert session.execution_reports[1].success
assert isinstance(session.execution_reports[1].exc_info[1], Skipped)
@pytest.mark.end_to_end
@pytest.mark.parametrize(
"mark_string", ["@pytask.mark.skip", "@pytask.mark.skipif(True, reason='bla')"]
)
def test_skip_if_dependency_is_missing(tmp_path, mark_string):
source = f"""
import pytask
{mark_string}
@pytask.mark.depends_on("in.txt")
def task_first():
assert 0
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
session = main({"paths": tmp_path})
assert session.execution_reports[0].success
assert isinstance(session.execution_reports[0].exc_info[1], Skipped)
@pytest.mark.end_to_end
@pytest.mark.parametrize(
"mark_string", ["@pytask.mark.skip", "@pytask.mark.skipif(True, reason='bla')"]
)
def test_skip_if_dependency_is_missing_only_for_one_task(runner, tmp_path, mark_string):
source = f"""
import pytask
{mark_string}
@pytask.mark.depends_on("in.txt")
def task_first():
assert 0
@pytask.mark.depends_on("in.txt")
def task_second():
assert 0
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix()])
assert result.exit_code == 4
assert "in.txt" in result.output
assert "task_first" not in result.output
assert "task_second" in result.output
@pytest.mark.end_to_end
def test_if_skipif_decorator_is_applied_skipping(tmp_path):
source = """
import pytask
@pytask.mark.skipif(condition=True, reason="bla")
@pytask.mark.produces("out.txt")
def task_first():
assert False
@pytask.mark.depends_on("out.txt")
def task_second():
assert False
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
session = main({"paths": tmp_path})
node = session.collection_reports[0].node
assert len(node.markers) == 1
assert node.markers[0].name == "skipif"
assert node.markers[0].args == ()
assert node.markers[0].kwargs == {"condition": True, "reason": "bla"}
assert session.execution_reports[0].success
assert isinstance(session.execution_reports[0].exc_info[1], Skipped)
assert session.execution_reports[1].success
assert isinstance(session.execution_reports[1].exc_info[1], Skipped)
assert session.execution_reports[0].exc_info[1].args[0] == "bla"
@pytest.mark.end_to_end
def test_if_skipif_decorator_is_applied_execute(tmp_path):
source = """
import pytask
@pytask.mark.skipif(False, reason="bla")
@pytask.mark.produces("out.txt")
def task_first(produces):
with open(produces, "w") as f:
f.write("hello world.")
@pytask.mark.depends_on("out.txt")
def task_second():
pass
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
session = main({"paths": tmp_path})
node = session.collection_reports[0].node
assert len(node.markers) == 1
assert node.markers[0].name == "skipif"
assert node.markers[0].args == (False,)
assert node.markers[0].kwargs == {"reason": "bla"}
assert session.execution_reports[0].success
assert session.execution_reports[0].exc_info is None
assert session.execution_reports[1].success
assert session.execution_reports[1].exc_info is None
@pytest.mark.end_to_end
def test_if_skipif_decorator_is_applied_any_condition_matches(tmp_path):
"""Any condition of skipif has to be True and only their message is shown."""
source = """
import pytask
@pytask.mark.skipif(condition=False, reason="I am fine")
@pytask.mark.skipif(condition=True, reason="No, I am not.")
@pytask.mark.produces("out.txt")
def task_first():
assert False
@pytask.mark.depends_on("out.txt")
def task_second():
assert False
"""
tmp_path.joinpath("task_dummy.py").write_text(textwrap.dedent(source))
session = main({"paths": tmp_path})
node = session.collection_reports[0].node
assert len(node.markers) == 2
assert node.markers[0].name == "skipif"
assert node.markers[0].args == ()
assert node.markers[0].kwargs == {"condition": True, "reason": "No, I am not."}
assert node.markers[1].name == "skipif"
assert node.markers[1].args == ()
assert node.markers[1].kwargs == {"condition": False, "reason": "I am fine"}
assert session.execution_reports[0].success
assert isinstance(session.execution_reports[0].exc_info[1], Skipped)
assert session.execution_reports[1].success
assert isinstance(session.execution_reports[1].exc_info[1], Skipped)
assert session.execution_reports[0].exc_info[1].args[0] == "No, I am not."
@pytest.mark.unit
@pytest.mark.parametrize(
("marker_name", "expectation"),
[
("skip_unchanged", pytest.raises(SkippedUnchanged)),
("skip_ancestor_failed", pytest.raises(SkippedAncestorFailed)),
("skip", pytest.raises(Skipped)),
("", does_not_raise()),
],
)
def test_pytask_execute_task_setup(marker_name, expectation):
class Task:
pass
task = Task()
kwargs = {"reason": ""} if marker_name == "skip_ancestor_failed" else {}
task.markers = [Mark(marker_name, (), kwargs)]
with expectation:
pytask_execute_task_setup(task)
| 30.662921 | 88 | 0.696836 | 50 | 0.006107 | 0 | 0 | 7,766 | 0.948577 | 0 | 0 | 2,665 | 0.325516 |
b2cacdeef0561546d139a9bec5f6cfde666b19a3 | 156 | py | Python | basic/server.py | spinico/django-projects-boilerplates | 22d47f60d282d0edb9c0f1b84bb3e9e84949bd25 | [
"MIT"
] | null | null | null | basic/server.py | spinico/django-projects-boilerplates | 22d47f60d282d0edb9c0f1b84bb3e9e84949bd25 | [
"MIT"
] | null | null | null | basic/server.py | spinico/django-projects-boilerplates | 22d47f60d282d0edb9c0f1b84bb3e9e84949bd25 | [
"MIT"
] | null | null | null | from waitress import serve
from conf.wsgi import application
if __name__ == '__main__':
serve(application, listen='0.0.0.0:8000', url_scheme='https')
| 22.285714 | 65 | 0.737179 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.198718 |
b2cacff06725bd2d9718bd414438fed14a74ef43 | 589 | py | Python | src/django_version_checks/apps.py | adamchainz/django-version-checks | 94f6d696f5279a7bb579c7d2a177a231d6b61e45 | [
"MIT"
] | 33 | 2020-12-13T23:02:39.000Z | 2022-03-28T06:19:09.000Z | src/django_version_checks/apps.py | adamchainz/django-version-checks | 94f6d696f5279a7bb579c7d2a177a231d6b61e45 | [
"MIT"
] | 47 | 2020-12-14T01:33:56.000Z | 2021-11-06T09:17:38.000Z | src/django_version_checks/apps.py | adamchainz/django-version-checks | 94f6d696f5279a7bb579c7d2a177a231d6b61e45 | [
"MIT"
] | 2 | 2021-11-13T22:56:21.000Z | 2022-02-15T14:24:53.000Z | from django.apps import AppConfig
from django.core.checks import Tags, register
from django_version_checks import checks
class DjangoVersionChecksAppConfig(AppConfig):
name = "django_version_checks"
verbose_name = "django-version-checks"
def ready(self) -> None:
register(Tags.compatibility)(checks.check_config)
register(Tags.compatibility)(checks.check_python_version)
register(Tags.database)(checks.check_postgresql_version)
register(Tags.database)(checks.check_mysql_version)
register(Tags.database)(checks.check_sqlite_version)
| 34.647059 | 65 | 0.7691 | 464 | 0.787776 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.078098 |
b2ccd20ece8fce408fc21dd559ba9fc865804c11 | 3,471 | py | Python | app/ocr.py | noahnisbet/human-rights-first-asylum-ds-noahnisbet | c329045e5967253d8b5ac729e315ed03325744b2 | [
"MIT"
] | 1 | 2021-02-25T21:26:08.000Z | 2021-02-25T21:26:08.000Z | app/ocr.py | noahnisbet/human-rights-first-asylum-ds-noahnisbet | c329045e5967253d8b5ac729e315ed03325744b2 | [
"MIT"
] | null | null | null | app/ocr.py | noahnisbet/human-rights-first-asylum-ds-noahnisbet | c329045e5967253d8b5ac729e315ed03325744b2 | [
"MIT"
] | null | null | null | import os
os.environ["OMP_NUM_THREADS"]= '1'
os.environ["OMP_THREAD_LIMIT"] = '1'
os.environ["MKL_NUM_THREADS"] = '1'
os.environ["NUMEXPR_NUM_THREADS"] = '1'
os.environ["OMP_NUM_THREADS"] = '1'
os.environ["PAPERLESS_AVX2_AVAILABLE"]="false"
os.environ["OCR_THREADS"] = '1'
import poppler
import pytesseract
from pdf2image import convert_from_bytes
from fastapi import APIRouter, File
import sqlalchemy
from dotenv import load_dotenv, find_dotenv
from sqlalchemy import create_engine
from app.BIA_Scraper import BIACase
import requests
import pandas as pd
import numpy as np
from PIL import Image
router = APIRouter()
load_dotenv(find_dotenv())
database_url = os.getenv('DATABASE_URL')
engine = sqlalchemy.create_engine(database_url)
@router.post('/get_text')
async def get_text_from_case_file(file: bytes = File(...)):
'''
This function inserts a PDF and the OCR converted text into a database
'''
text = []
### Converts the bytes object recieved from fastapi
pages = convert_from_bytes(file,200,fmt='png',thread_count=2)
### Uses pytesseract to convert each page of pdf to txt
text.append(pytesseract.image_to_string(pages))
### Joins the list to an output string
string_to_return = " ".join(text)
return {'Text': string_to_return}
@router.post('/get_fields')
async def get_fields_from_case_file(file: bytes = File(...)):
text = []
### Converts the bytes object recieved from fastapi
pages = convert_from_bytes(file,200,fmt='png',thread_count=2)
### Uses pytesseract to convert each page of pdf to txt
for item in pages:
text.append(pytesseract.image_to_string(item))
### Joins the list to an output string
string = " ".join(text)
### Using the BIACase Class to populate fields
case = BIACase(string)
### Json object / dictionary to be returned
case_data = {}
### Application field
app = case.get_application()
app = [ap for ap, b in app.items() if b]
case_data['application'] = '; '.join(app) if app else None
### Date field
case_data['date'] = case.get_date()
### Country of origin
case_data['country_of_origin'] = case.get_country_of_origin()
### Getting Panel members
panel = case.get_panel()
case_data['panel_members'] = '; '.join(panel) if panel else None
### Getting case outcome
case_data['outcome'] = case.get_outcome()
### Getting protected grounds
pgs = case.get_protected_grounds()
case_data['protected_grounds'] = '; '.join(pgs) if pgs else None
### Getting the violence type on the asylum seeker
based_violence = case.get_based_violence()
violence = '; '.join([k for k, v in based_violence.items() if v]) \
if based_violence \
else None
### Getting keywords
keywords = '; '.join(['; '.join(v) for v in based_violence.values()]) \
if based_violence \
else None
case_data['based_violence'] = violence
case_data['keywords'] = keywords
### Getting references / sex of applicant
references = [
'Matter of AB, 27 I&N Dec. 316 (A.G. 2018)'
if case.references_AB27_216() else None,
'Matter of L-E-A-, 27 I&N Dec. 581 (A.G. 2019)'
if case.references_LEA27_581() else None
]
case_data['references'] = '; '.join([r for r in references if r])
case_data['sex_of_applicant'] = case.get_seeker_sex()
return case_data
| 28.219512 | 75 | 0.669548 | 0 | 0 | 0 | 0 | 2,728 | 0.785941 | 2,674 | 0.770383 | 1,178 | 0.339383 |
b2cd1412230dab0559fa3bfa9b195e544581cd4a | 3,889 | py | Python | lp_local_search.py | cddoyle/div-k-median | fa2e3dc01f257602aa83e151c3bc268a76f8075e | [
"MIT"
] | null | null | null | lp_local_search.py | cddoyle/div-k-median | fa2e3dc01f257602aa83e151c3bc268a76f8075e | [
"MIT"
] | null | null | null | lp_local_search.py | cddoyle/div-k-median | fa2e3dc01f257602aa83e151c3bc268a76f8075e | [
"MIT"
] | null | null | null | import time
import sys
import numpy as np
from local_search import kmedian_local_search
import feasibility
from kmedkpm import k_median_k_partitions_LS
import psutil
from sklearn.datasets import make_blobs
import generator
import random
test = False
def lp_ls_complete(data, color_mat, rvec, k, logfile):
############################################################################
# INPUT
# data: N X d numpy array
# color_mat: N*t numpy array representing groups' memberships
# rvec: requirements vector of t size
# k: number of clusters
# OUTPUT
# Object with stats, i.e, with "cost" that is a cost of the solution
############################################################################
(N, d) = data.shape
many_solutions_lp_only = True
command = 'linear-program'
return_solution = False
process = psutil.Process()
tstart = time.time()
time_buf = time.time()
perf_stats = feasibility.calculate(k, rvec, color_mat,
command, return_solution,
logfile, many_solutions_lp_only)
set_mappings = perf_stats["subset_map"]
solution = perf_stats["solution"]
set_to_indices = {}
for (idx, _id) in enumerate(sorted(set_mappings.keys())):
set_to_indices[idx] = _id
#end for
unique_solutions = solution if len(solution) == 0 else np.unique(np.stack(solution, axis=0), axis=0)
print('solutions: ', 0 if len(solution) == 0 else unique_solutions.shape[0])
total_cost = sys.maxsize
time_buf = time.time()
for (_, s) in enumerate(unique_solutions):
E = {}
i = 0
for (idx, e) in enumerate(s):
for _ in range(e):
#E[i] = (idx, set_to_indices[idx])
E[i] = data[set_mappings[set_to_indices[idx]], :]
i = i + 1
#end for
#end for
if k > i:
continue
statc = k_median_k_partitions_LS(E, data, None, N, d, k, is_coreset=False)
total_cost = min(total_cost, statc["cost"])
# print(set_to_indices)
kmedkpmtime = time.time() - time_buf
total_time = time.time() - tstart
stats_total = {}
opt_ls_cost = kmedian_local_search(data, k)["cost"]
stats_total['opt_ls_cost'] = opt_ls_cost
stats_total["lp_time"] = perf_stats["total_time"]
stats_total["total_time"] = total_time
stats_total["ls_time"] = kmedkpmtime
stats_total['peak_memory'] = process.memory_info().rss/(1024*1024)
stats_total['virtual_memory'] = process.memory_info().vms/(1024*1024)
stats_total['cost'] = total_cost
return stats_total
#end lp_ls_complete()
def test_lp_ls_complete():
#random number generator seeds
gen_seed = 12312321
dist_matrix_seed = random.randint(1, int(pow(2, 32)-1))
local_search_seed = random.randint(1, int(pow(2, 32)-1))
#initialize
logfile = sys.stdout
n = 100
t = 3
k = 3
d = 2
r_max = 3
r_min = 1
max_freq = 3
data, _ = make_blobs(n_samples=n, centers=k, n_features=d,
random_state=12312, cluster_std=0.8)
#generate instance and time it
time_buf = time.time()
color_mat, rvec, _ = generator.get_feasible_instance(
t,
n,
r_max,
r_min,
max_freq,
k,
gen_seed,
unique=False)
lp_ls_complete(data, color_mat, rvec, k, logfile)
#end es_fpt_3apx_complete_test()
################################################################################
if __name__ == '__main__':
test_lp_ls_complete()
| 32.408333 | 104 | 0.54487 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 829 | 0.213165 |
b2cdb0c942905c9f4fb6dbf73dca96d1a9a5f768 | 415 | py | Python | dms/v2/meal/__init__.py | moreal/DMS-api | 9624e28764ec4535002677671e10a09d762d19a8 | [
"MIT"
] | null | null | null | dms/v2/meal/__init__.py | moreal/DMS-api | 9624e28764ec4535002677671e10a09d762d19a8 | [
"MIT"
] | null | null | null | dms/v2/meal/__init__.py | moreal/DMS-api | 9624e28764ec4535002677671e10a09d762d19a8 | [
"MIT"
] | 1 | 2018-09-29T14:35:20.000Z | 2018-09-29T14:35:20.000Z | import datetime
import requests
import json
from dms.v2.config import DMS_URL
class Meal():
@staticmethod
def get(date: datetime.date or str=datetime.date.today()):
if not isinstance(date, str):
date = str(date)
resp = requests.get(f"http://{DMS_URL}/v2/meal/{date}")
meals = json.loads(resp.text)
return meals['breakfast'], meals['lunch'], meals['dinner']
| 21.842105 | 66 | 0.636145 | 333 | 0.80241 | 0 | 0 | 315 | 0.759036 | 0 | 0 | 60 | 0.144578 |
b2cf11ab3d7e9318bb55599575d25a729b83ace2 | 319 | py | Python | mayan/apps/dependencies/permissions.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 2 | 2021-09-12T19:41:19.000Z | 2021-09-12T19:41:20.000Z | mayan/apps/dependencies/permissions.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 37 | 2021-09-13T01:00:12.000Z | 2021-10-02T03:54:30.000Z | mayan/apps/dependencies/permissions.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 1 | 2021-09-22T13:17:30.000Z | 2021-09-22T13:17:30.000Z | from django.utils.translation import ugettext_lazy as _
from mayan.apps.permissions import PermissionNamespace
namespace = PermissionNamespace(label=_('Dependencies'), name='dependencies')
permission_dependencies_view = namespace.add_permission(
label=_('View dependencies'), name='dependencies_view'
)
| 31.9 | 78 | 0.793103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.206897 |
b2d171ee084b4ded299d8d9b2d8e8e0fa604218a | 213 | py | Python | src/about.py | jukeboxroundtable/JukeboxRoundtable | 06670d2e8511848829b68fddac5bc77806606f98 | [
"MIT"
] | 1 | 2019-02-15T17:33:51.000Z | 2019-02-15T17:33:51.000Z | src/about.py | jukeboxroundtable/JukeboxRoundtable | 06670d2e8511848829b68fddac5bc77806606f98 | [
"MIT"
] | 37 | 2019-01-30T18:32:43.000Z | 2019-06-11T18:00:11.000Z | src/about.py | jukeboxroundtable/JukeboxRoundtable | 06670d2e8511848829b68fddac5bc77806606f98 | [
"MIT"
] | null | null | null | from flask import Blueprint, render_template
about_blueprint = Blueprint('about', __name__)
@about_blueprint.route('/about')
def about():
"""Show the about page."""
return render_template('about.html')
| 21.3 | 46 | 0.7277 | 0 | 0 | 0 | 0 | 117 | 0.549296 | 0 | 0 | 53 | 0.248826 |