hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d7c741b81d484e97159c53ba61d889aa64fb4322 | 402 | py | Python | calibration/capture.py | dangthanhan507/odcl | 60b7d67bbeabda58090c76f6bdbfae4559eea9ac | [
"MIT"
] | null | null | null | calibration/capture.py | dangthanhan507/odcl | 60b7d67bbeabda58090c76f6bdbfae4559eea9ac | [
"MIT"
] | 7 | 2021-10-30T02:16:46.000Z | 2021-11-17T21:23:12.000Z | calibration/capture.py | dangthanhan507/odcl | 60b7d67bbeabda58090c76f6bdbfae4559eea9ac | [
"MIT"
] | null | null | null | import cv2
import os
import argparse
if __name__ == '__main__':
arg = argparse.ArgumentParser()
arg.add_argument('--o', required=True, help='output_folder')
opt = arg.parse_args()
cap = cv2.VideoCapture(0)
ret, img = cap.read()
print('Writing to chessboard file')
files = os.listdir(opt.o)
index = 0
while (f'img{index}.jpg' in files):
index+=1
cv2.imwrite(f'{opt.o}/img{index}.jpg', img)
| 23.647059 | 61 | 0.691542 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.248756 |
d7c7d36b08b7910b33e0fece9387522df88e2cd2 | 3,105 | py | Python | commands/FBTextInputCommands.py | zddd/chisel | 7782bdde3062e15ccbdc5f617aa3a8f096b6751b | [
"MIT"
] | 1 | 2020-03-04T20:24:33.000Z | 2020-03-04T20:24:33.000Z | commands/FBTextInputCommands.py | zddd/chisel | 7782bdde3062e15ccbdc5f617aa3a8f096b6751b | [
"MIT"
] | null | null | null | commands/FBTextInputCommands.py | zddd/chisel | 7782bdde3062e15ccbdc5f617aa3a8f096b6751b | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import lldb
import fblldbbase as fb
import fblldbviewhelpers as viewHelpers
ACCESSIBILITY_ID = 0
REPLACEMENT_TEXT = 1
INPUT_TEXT = 0
def lldbcommands():
return [
FBInputTexByAccessibilityIdCommand(),
FBInputTexToFirstResponderCommand(),
]
class FBInputTexByAccessibilityIdCommand(fb.FBCommand):
def name(self):
return 'settext'
def description(self):
return 'Set text on text on a view by accessibility id.'
def args(self):
return [
fb.FBCommandArgument(arg='accessibilityId', type='string', help='The accessibility ID of the input view.'),
fb.FBCommandArgument(arg='replacementText', type='string', help='The text to set.')
]
def run(self, arguments, options):
self.findView(rootView(), arguments[ACCESSIBILITY_ID], arguments[REPLACEMENT_TEXT])
def findView(self, view, searchIdentifier, replacementText):
views = subviewsOfView(view)
for index in range(0, viewsCount(views)):
subview = subviewAtIndex(views, index)
self.findView(subview, searchIdentifier, replacementText)
else:
identifier = accessibilityIdentifier(view)
if isEqualToString(identifier, searchIdentifier):
setTextInView(view, replacementText)
class FBInputTexToFirstResponderCommand(fb.FBCommand):
def name(self):
return 'setinput'
def description(self):
return 'Input text into text field or text view that is first responder.'
def args(self):
return [
fb.FBCommandArgument(arg='inputText', type='string', help='The text to input.')
]
def run(self, arguments, options):
self.findFirstResponder(rootView(), arguments[INPUT_TEXT])
def findFirstResponder(self, view, replacementText):
views = subviewsOfView(view)
if isFirstResponder(view):
setTextInView(view, replacementText)
else:
for index in range(0, viewsCount(views)):
subview = subviewAtIndex(views, index)
self.findFirstResponder(subview, replacementText)
# Some helpers
def rootView():
return fb.evaluateObjectExpression('[[UIApplication sharedApplication] keyWindow]')
def subviewsOfView(view):
return fb.evaluateObjectExpression('[%s subviews]' % view)
def subviewAtIndex(views, index):
return fb.evaluateObjectExpression('[%s objectAtIndex:%i]' % (views, index))
def viewsCount(views):
return int(fb.evaluateExpression('(int)[%s count]' % views))
def accessibilityIdentifier(view):
return fb.evaluateObjectExpression('[%s accessibilityIdentifier]' % view)
def isEqualToString(identifier, needle):
return fb.evaluateBooleanExpression('[%s isEqualToString:@"%s"]' % (identifier, needle))
def setTextInView(view, text):
fb.evaluateObjectExpression('[%s setText:@"%s"]' % (view, text))
viewHelpers.flushCoreAnimationTransaction()
def isFirstResponder(view):
return fb.evaluateBooleanExpression('[%s isFirstResponder]' % view)
| 30.145631 | 113 | 0.732689 | 1,720 | 0.553945 | 0 | 0 | 0 | 0 | 0 | 0 | 709 | 0.228341 |
d7c8a43a00510f5f2e6fd644ecd312c0d8026e5e | 3,105 | py | Python | tests/test_operator_web_site.py | luigi-riefolo/network_crawler | 376fb5860c573416ac71a0dfe5437011858398b6 | [
"MIT"
] | null | null | null | tests/test_operator_web_site.py | luigi-riefolo/network_crawler | 376fb5860c573416ac71a0dfe5437011858398b6 | [
"MIT"
] | null | null | null | tests/test_operator_web_site.py | luigi-riefolo/network_crawler | 376fb5860c573416ac71a0dfe5437011858398b6 | [
"MIT"
] | null | null | null | """OperatorWebSite class unit test."""
from __init__ import json, os, time, unittest, \
webdriver, WebDriverException, OperatorWebSite
class TestOperatorWebSite(unittest.TestCase):
"""Unit test class for OperatorWebSite."""
def load_data(self):
""" Load the data file. """
self.data = None
file_name = os.path.abspath("data/sites/operators.json")
file_data = None
# Check whether the file exists
self.assertTrue(os.path.isfile(file_name), 'Invalid data file')
# Open the file and load its data
try:
file_data = open(file_name, 'r')
except (IOError, OSError) as err:
raise err
else:
try:
# Load the data file into a JSON object
self.data = json.loads(file_data.read())["operators"][0]
self.assertIsNotNone(self.data)
except ValueError as err:
raise err
finally:
file_data.close()
def setUp(self):
"""Setup."""
try:
# Chrome driver
self.driver = webdriver.Chrome()
self.load_data()
# Create the operator web site object
self.operator_obj = OperatorWebSite(self.driver, self.data)
self.assertIsNotNone(
self.operator_obj, 'Could not creat OperatorWebSite object')
self.driver.get(self.data["url"])
except WebDriverException:
self.driver.quit()
raise
def tearDown(self):
"""Tear down."""
# Close and quit the browser
self.driver.close()
def run_action(self, action_name, action_args=None):
"""Run a specific OperatorWebSite action."""
# Execute the requested web driver action
method = self.operator_obj.get_attr(
self.operator_obj, action_name)
self.assertIsNotNone(
method, 'Failed to get method \'%s\'' % action_name)
res = method(action_args)
self.assertIsNotNone(res, ('Action \'%s\' failed', action_name))
time.sleep(3)
return res
def test_type_zone(self):
args = {
'path': self.data['actions'][0]['type_zone'],
'zone': self.data['zones'][0]}
action = 'type_zone'
res = self.run_action(action, args)
self.assertTrue(res, ('Action \'%s\' failed', action))
def test_click(self):
self.test_type_zone()
action = 'click'
args = {'path': self.data['actions'][1][action]}
res = self.run_action(action, args)
self.assertTrue(res, ('Action \'%s\' failed', action))
def test_get_cost(self):
self.test_type_zone()
self.test_click()
action = 'get_cost'
args = {'path': self.data['actions'][2][action]}
res = self.run_action(action, args)
self.assertTrue(res, ('Action \'%s\' failed', action))
def test_not_action(self):
self.assertRaises(AssertionError, self.run_action, "not_action")
if __name__ == '__main__':
unittest.main()
| 31.363636 | 76 | 0.5781 | 2,912 | 0.937842 | 0 | 0 | 0 | 0 | 0 | 0 | 744 | 0.239614 |
d7c96b0c8aeabc0eec3210749c777002aac7b033 | 32,365 | py | Python | app/tests/refs/ectyper_dict.py | superphy/spfy | 867e61b32ab00ec536378f96a63f0fb379f47c58 | [
"Apache-2.0"
] | 2 | 2019-05-22T14:29:37.000Z | 2020-02-13T11:30:46.000Z | app/tests/refs/ectyper_dict.py | superphy/backend | 867e61b32ab00ec536378f96a63f0fb379f47c58 | [
"Apache-2.0"
] | 88 | 2017-04-07T21:52:10.000Z | 2018-03-10T23:12:47.000Z | app/tests/refs/ectyper_dict.py | superphy/backend | 867e61b32ab00ec536378f96a63f0fb379f47c58 | [
"Apache-2.0"
] | 2 | 2017-02-10T21:30:13.000Z | 2017-06-05T22:30:17.000Z | # output from call_ectyper.py, to be sent to beautify.py
# example is from ECI-2866_lcl.fasta_ectyper.p
ectyper_dict = {'Virulence Factors': {'lcl|ECI-2866|NODE_56_length_6694_cov_33.7669_ID_111': [{'START': 4864, 'STOP': 5820, 'ORIENTATION': '+', 'GENE_NAME': 'stx1A'}, {'START': 4873, 'STOP': 5820, 'ORIENTATION': '+', 'GENE_NAME': 'stx1A'}, {'START': 4873, 'STOP': 5820, 'ORIENTATION': '+', 'GENE_NAME': 'stx1vA'}, {'START': 5830, 'STOP': 6099, 'ORIENTATION': '+', 'GENE_NAME': 'stx1B'}, {'START': 5830, 'STOP': 6099, 'ORIENTATION': '+', 'GENE_NAME': 'stx1vB'}], 'lcl|ECI-2866|NODE_144_length_772_cov_35.0868_ID_287': [{'START': 1, 'STOP': 112, 'ORIENTATION': '+', 'GENE_NAME': 'epeA'}, {'START': 1, 'STOP': 112, 'ORIENTATION': '+', 'GENE_NAME': 'CAC39286'}, {'START': 1, 'STOP': 112, 'ORIENTATION': '+', 'GENE_NAME': 'espI'}, {'START': 1, 'STOP': 112, 'ORIENTATION': '+', 'GENE_NAME': 'espP'}], 'lcl|ECI-2866|NODE_37_length_34194_cov_30.2716_ID_73': [{'START': 202, 'STOP': 241, 'ORIENTATION': '+', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_13_length_131517_cov_29.4639_ID_25': [{'START': 83949, 'STOP': 86025, 'ORIENTATION': '-', 'GENE_NAME': 'flhA'}, {'START': 94335, 'STOP': 96299, 'ORIENTATION': '-', 'GENE_NAME': 'cheA'}, {'START': 92005, 'STOP': 93666, 'ORIENTATION': '-', 'GENE_NAME': 'tar/cheM'}, {'START': 86018, 'STOP': 87166, 'ORIENTATION': '-', 'GENE_NAME': 'flhB'}, {'START': 88427, 'STOP': 89476, 'ORIENTATION': '-', 'GENE_NAME': 'cheB'}, {'START': 96304, 'STOP': 97230, 'ORIENTATION': '-', 'GENE_NAME': 'motB'}, {'START': 97227, 'STOP': 98114, 'ORIENTATION': '-', 'GENE_NAME': 'motA'}, {'START': 89479, 'STOP': 90339, 'ORIENTATION': '-', 'GENE_NAME': 'cheR'}, {'START': 119779, 'STOP': 120579, 'ORIENTATION': '-', 'GENE_NAME': 'fliY'}, {'START': 121264, 'STOP': 121983, 'ORIENTATION': '-', 'GENE_NAME': 'fliA'}, {'START': 87368, 'STOP': 88012, 'ORIENTATION': '-', 'GENE_NAME': 'cheZ'}, {'START': 98241, 'STOP': 98819, 'ORIENTATION': '-', 'GENE_NAME': 'flhC'}, {'START': 120667, 'STOP': 121254, 'ORIENTATION': '-', 'GENE_NAME': 'fliZ'}, {'START': 93811, 'STOP': 94314, 'ORIENTATION': '-', 'GENE_NAME': 'cheW'}, {'START': 88023, 'STOP': 88412, 'ORIENTATION': '-', 'GENE_NAME': 'cheY'}, {'START': 125330, 'STOP': 125718, 'ORIENTATION': '+', 'GENE_NAME': 'fliS'}, {'START': 83555, 'STOP': 83947, 'ORIENTATION': '-', 'GENE_NAME': 'flhE'}, {'START': 98822, 'STOP': 99181, 'ORIENTATION': '-', 'GENE_NAME': 'flhD'}, {'START': 125718, 'STOP': 126083, 'ORIENTATION': '+', 'GENE_NAME': 'fliT'}, {'START': 75403, 'STOP': 75517, 'ORIENTATION': '+', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_49_length_12118_cov_18.277_ID_97': [{'START': 3814, 'STOP': 6414, 'ORIENTATION': '+', 'GENE_NAME': 'cdiA'}, {'START': 3814, 'STOP': 6087, 'ORIENTATION': '+', 'GENE_NAME': 'cdiA'}, {'START': 9423, 'STOP': 11510, 'ORIENTATION': '-', 'GENE_NAME': 'c3610'}, {'START': 9423, 'STOP': 11510, 'ORIENTATION': '-', 'GENE_NAME': 'iha'}, {'START': 8223, 'STOP': 8399, 'ORIENTATION': '+', 'GENE_NAME': 'aaiW'}], 'lcl|ECI-2866|NODE_55_length_6881_cov_29.4505_ID_109': [{'START': 1922, 'STOP': 3667, 'ORIENTATION': '-', 'GENE_NAME': 'cei'}], 'lcl|ECI-2866|NODE_33_length_43220_cov_31.1898_ID_65': [{'START': 37776, 'STOP': 39434, 'ORIENTATION': '-', 'GENE_NAME': 'fliF'}, {'START': 34736, 'STOP': 36109, 'ORIENTATION': '-', 'GENE_NAME': 'fliI'}, {'START': 33150, 'STOP': 34277, 'ORIENTATION': '-', 'GENE_NAME': 'fliK'}, {'START': 36788, 'STOP': 37783, 'ORIENTATION': '-', 'GENE_NAME': 'fliG'}, {'START': 31572, 'STOP': 32576, 'ORIENTATION': '-', 'GENE_NAME': 'fliM'}, {'START': 28985, 'STOP': 29770, 'ORIENTATION': '-', 'GENE_NAME': 'fliR'}, {'START': 30057, 'STOP': 30794, 'ORIENTATION': '-', 'GENE_NAME': 'fliP'}, {'START': 36109, 'STOP': 36795, 'ORIENTATION': '-', 'GENE_NAME': 'fliH'}, {'START': 32581, 'STOP': 33045, 'ORIENTATION': '-', 'GENE_NAME': 'fliL'}, {'START': 34274, 'STOP': 34717, 'ORIENTATION': '-', 'GENE_NAME': 'fliJ'}, {'START': 31162, 'STOP': 31575, 'ORIENTATION': '-', 'GENE_NAME': 'fliN'}, {'START': 30794, 'STOP': 31159, 'ORIENTATION': '-', 'GENE_NAME': 'fliO'}, {'START': 39649, 'STOP': 39963, 'ORIENTATION': '+', 'GENE_NAME': 'fliE'}, {'START': 29778, 'STOP': 30047, 'ORIENTATION': '-', 'GENE_NAME': 'fliQ'}], 'lcl|ECI-2866|NODE_60_length_5406_cov_21.6393_ID_119': [{'START': 2729, 'STOP': 5406, 'ORIENTATION': '+', 'GENE_NAME': 'espP'}, {'START': 5262, 'STOP': 5334, 'ORIENTATION': '+', 'GENE_NAME': 'CAC39286'}, {'START': 5262, 'STOP': 5334, 'ORIENTATION': '+', 'GENE_NAME': 'espI'}], 'lcl|ECI-2866|NODE_9_length_157371_cov_34.6522_ID_17': [{'START': 51095, 'STOP': 53731, 'ORIENTATION': '+', 'GENE_NAME': 'fimD'}, {'START': 51104, 'STOP': 53731, 'ORIENTATION': '+', 'GENE_NAME': 'fimD'}, {'START': 51161, 'STOP': 53731, 'ORIENTATION': '+', 'GENE_NAME': 'fimD'}, {'START': 51095, 'STOP': 53612, 'ORIENTATION': '+', 'GENE_NAME': 'fimD'}, {'START': 51140, 'STOP': 53731, 'ORIENTATION': '+', 'GENE_NAME': 'fimD'}, {'START': 51753, 'STOP': 53731, 'ORIENTATION': '+', 'GENE_NAME': 'fimD'}, {'START': 54807, 'STOP': 55709, 'ORIENTATION': '+', 'GENE_NAME': 'fimH'}, {'START': 54798, 'STOP': 55709, 'ORIENTATION': '+', 'GENE_NAME': 'fimH'}, {'START': 50303, 'STOP': 51028, 'ORIENTATION': '+', 'GENE_NAME': 'fimC'}, {'START': 50354, 'STOP': 51028, 'ORIENTATION': '+', 'GENE_NAME': 'fimC'}, {'START': 49604, 'STOP': 50266, 'ORIENTATION': '+', 'GENE_NAME': 'fimI'}, {'START': 49621, 'STOP': 50266, 'ORIENTATION': '+', 'GENE_NAME': 'fimI'}, {'START': 49628, 'STOP': 50266, 'ORIENTATION': '+', 'GENE_NAME': 'fimI'}, {'START': 50372, 'STOP': 51028, 'ORIENTATION': '+', 'GENE_NAME': 'fimC'}, {'START': 46957, 'STOP': 47559, 'ORIENTATION': '+', 'GENE_NAME': 'fimB'}, {'START': 48037, 'STOP': 48633, 'ORIENTATION': '+', 'GENE_NAME': 'fimE'}, {'START': 48037, 'STOP': 48631, 'ORIENTATION': '+', 'GENE_NAME': 'fimE'}, {'START': 53741, 'STOP': 54271, 'ORIENTATION': '+', 'GENE_NAME': 'fimF'}, {'START': 49727, 'STOP': 50266, 'ORIENTATION': '+', 'GENE_NAME': 'fimI'}, {'START': 49727, 'STOP': 50255, 'ORIENTATION': '+', 'GENE_NAME': 'fimI'}, {'START': 54284, 'STOP': 54787, 'ORIENTATION': '+', 'GENE_NAME': 'fimG'}, {'START': 49727, 'STOP': 50236, 'ORIENTATION': '+', 'GENE_NAME': 'fimI'}, {'START': 54278, 'STOP': 54787, 'ORIENTATION': '+', 'GENE_NAME': 'fimG'}, {'START': 53738, 'STOP': 54271, 'ORIENTATION': '+', 'GENE_NAME': 'fimF'}, {'START': 49769, 'STOP': 50266, 'ORIENTATION': '+', 'GENE_NAME': 'fimI'}, {'START': 49114, 'STOP': 49662, 'ORIENTATION': '+', 'GENE_NAME': 'fimA'}, {'START': 49057, 'STOP': 49662, 'ORIENTATION': '+', 'GENE_NAME': 'fimA'}, {'START': 47152, 'STOP': 47559, 'ORIENTATION': '+', 'GENE_NAME': 'fimB'}, {'START': 47380, 'STOP': 47559, 'ORIENTATION': '+', 'GENE_NAME': 'fimB'}], 'lcl|ECI-2866|NODE_26_length_62239_cov_34.3381_ID_51': [{'START': 60912, 'STOP': 61544, 'ORIENTATION': '-', 'GENE_NAME': 'gadX'}], 'lcl|ECI-2866|NODE_46_length_15742_cov_35.072_ID_91': [{'START': 3402, 'STOP': 4355, 'ORIENTATION': '-', 'GENE_NAME': 'ompt'}], 'lcl|ECI-2866|NODE_63_length_4414_cov_25.6513_ID_125': [{'START': 4054, 'STOP': 4414, 'ORIENTATION': '-', 'GENE_NAME': 'aec15'}, {'START': 4054, 'STOP': 4412, 'ORIENTATION': '-', 'GENE_NAME': 'aec15'}, {'START': 4054, 'STOP': 4412, 'ORIENTATION': '-', 'GENE_NAME': 'vgrG'}], 'lcl|ECI-2866|NODE_11_length_143533_cov_28.5907_ID_21': [{'START': 11462, 'STOP': 13360, 'ORIENTATION': '-', 'GENE_NAME': 'espL1'}, {'START': 11462, 'STOP': 13015, 'ORIENTATION': '-', 'GENE_NAME': 'espL1'}, {'START': 11462, 'STOP': 12952, 'ORIENTATION': '-', 'GENE_NAME': 'espL1'}], 'lcl|ECI-2866|NODE_15_length_124782_cov_33.4952_ID_29': [{'START': 80313, 'STOP': 81551, 'ORIENTATION': '+', 'GENE_NAME': 'hofq'}, {'START': 202, 'STOP': 247, 'ORIENTATION': '+', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_39_length_33722_cov_30.3088_ID_77': [{'START': 32778, 'STOP': 33598, 'ORIENTATION': '+', 'GENE_NAME': 'ehaB'}, {'START': 32778, 'STOP': 32867, 'ORIENTATION': '+', 'GENE_NAME': 'ehaB'}, {'START': 32778, 'STOP': 32876, 'ORIENTATION': '+', 'GENE_NAME': 'upaC'}, {'START': 32778, 'STOP': 32876, 'ORIENTATION': '+', 'GENE_NAME': 'ehaB'}, {'START': 32778, 'STOP': 32858, 'ORIENTATION': '+', 'GENE_NAME': 'ehaB'}, {'START': 21352, 'STOP': 21387, 'ORIENTATION': '-', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_18_length_100066_cov_32.3135_ID_35': [{'START': 97778, 'STOP': 99939, 'ORIENTATION': '-', 'GENE_NAME': 'ehaB'}, {'START': 97778, 'STOP': 99516, 'ORIENTATION': '-', 'GENE_NAME': 'ehaB'}, {'START': 97778, 'STOP': 99220, 'ORIENTATION': '-', 'GENE_NAME': 'ehaB'}, {'START': 97778, 'STOP': 99139, 'ORIENTATION': '-', 'GENE_NAME': 'ehaB'}, {'START': 97778, 'STOP': 99028, 'ORIENTATION': '-', 'GENE_NAME': 'ehaB'}, {'START': 81029, 'STOP': 81063, 'ORIENTATION': '+', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_16_length_121752_cov_30.7733_ID_31': [{'START': 75288, 'STOP': 76331, 'ORIENTATION': '-', 'GENE_NAME': 'nada'}, {'START': 25312, 'STOP': 25431, 'ORIENTATION': '+', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_6_length_171861_cov_30.779_ID_11': [{'START': 77470, 'STOP': 80070, 'ORIENTATION': '+', 'GENE_NAME': 'ycbS'}, {'START': 97267, 'STOP': 98307, 'ORIENTATION': '-', 'GENE_NAME': 'Z1307'}, {'START': 97267, 'STOP': 98307, 'ORIENTATION': '-', 'GENE_NAME': 'ompA'}, {'START': 80061, 'STOP': 81038, 'ORIENTATION': '+', 'GENE_NAME': 'ycbT'}, {'START': 82174, 'STOP': 82911, 'ORIENTATION': '+', 'GENE_NAME': 'ycbF'}, {'START': 76744, 'STOP': 77445, 'ORIENTATION': '+', 'GENE_NAME': 'ycbR'}, {'START': 81645, 'STOP': 82208, 'ORIENTATION': '+', 'GENE_NAME': 'ycbV'}, {'START': 81209, 'STOP': 81685, 'ORIENTATION': '+', 'GENE_NAME': 'ycbU'}], 'lcl|ECI-2866|NODE_12_length_136264_cov_30.9614_ID_23': [{'START': 21634, 'STOP': 25457, 'ORIENTATION': '+', 'GENE_NAME': 'entF'}, {'START': 17731, 'STOP': 19971, 'ORIENTATION': '-', 'GENE_NAME': 'fepA'}, {'START': 33547, 'STOP': 35157, 'ORIENTATION': '+', 'GENE_NAME': 'entE'}, {'START': 4072, 'STOP': 5454, 'ORIENTATION': '+', 'GENE_NAME': 'ibeB'}, {'START': 4072, 'STOP': 5444, 'ORIENTATION': '+', 'GENE_NAME': 'ibeB'}, {'START': 4068, 'STOP': 5454, 'ORIENTATION': '+', 'GENE_NAME': 'ibeB'}, {'START': 32350, 'STOP': 33537, 'ORIENTATION': '+', 'GENE_NAME': 'entC'}, {'START': 29777, 'STOP': 31027, 'ORIENTATION': '+', 'GENE_NAME': 'entS'}, {'START': 20214, 'STOP': 21416, 'ORIENTATION': '+', 'GENE_NAME': 'fes'}, {'START': 1, 'STOP': 1057, 'ORIENTATION': '-', 'GENE_NAME': 'aec15'}, {'START': 25731, 'STOP': 26864, 'ORIENTATION': '+', 'GENE_NAME': 'fepE'}, {'START': 28662, 'STOP': 29678, 'ORIENTATION': '-', 'GENE_NAME': 'fepD'}, {'START': 31031, 'STOP': 31987, 'ORIENTATION': '-', 'GENE_NAME': 'fepB'}, {'START': 5, 'STOP': 1091, 'ORIENTATION': '-', 'GENE_NAME': 'aec15'}, {'START': 27673, 'STOP': 28665, 'ORIENTATION': '-', 'GENE_NAME': 'fepG'}, {'START': 1, 'STOP': 1057, 'ORIENTATION': '-', 'GENE_NAME': 'vgrG'}, {'START': 5, 'STOP': 1057, 'ORIENTATION': '-', 'GENE_NAME': 'aec15'}, {'START': 35171, 'STOP': 36028, 'ORIENTATION': '+', 'GENE_NAME': 'entB'}, {'START': 26861, 'STOP': 27676, 'ORIENTATION': '-', 'GENE_NAME': 'fepC'}, {'START': 36028, 'STOP': 36774, 'ORIENTATION': '+', 'GENE_NAME': 'entA'}, {'START': 16936, 'STOP': 17706, 'ORIENTATION': '-', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_38_length_33984_cov_20.4854_ID_75': [{'START': 17702, 'STOP': 20698, 'ORIENTATION': '-', 'GENE_NAME': 'hlyA'}, {'START': 22471, 'STOP': 25848, 'ORIENTATION': '-', 'GENE_NAME': 'LH0147'}, {'START': 15532, 'STOP': 17652, 'ORIENTATION': '-', 'GENE_NAME': 'hlyB'}, {'START': 14089, 'STOP': 15528, 'ORIENTATION': '-', 'GENE_NAME': 'hlyD'}, {'START': 20700, 'STOP': 21215, 'ORIENTATION': '-', 'GENE_NAME': 'hlyC'}, {'START': 20700, 'STOP': 21191, 'ORIENTATION': '-', 'GENE_NAME': 'hlyC'}, {'START': 6324, 'STOP': 6628, 'ORIENTATION': '-', 'GENE_NAME': 'ccdb'}, {'START': 9127, 'STOP': 9443, 'ORIENTATION': '-', 'GENE_NAME': 'cia'}, {'START': 9127, 'STOP': 9443, 'ORIENTATION': '-', 'GENE_NAME': 'ECS88'}], 'lcl|ECI-2866|NODE_86_length_1960_cov_20.1937_ID_171': [{'START': 1, 'STOP': 938, 'ORIENTATION': '+', 'GENE_NAME': 'aec15'}, {'START': 3, 'STOP': 938, 'ORIENTATION': '+', 'GENE_NAME': 'aec15'}], 'lcl|ECI-2866|NODE_73_length_2413_cov_26.5752_ID_145': [{'START': 472, 'STOP': 1266, 'ORIENTATION': '+', 'GENE_NAME': 'saa'}], 'lcl|ECI-2866|NODE_23_length_82758_cov_29.125_ID_45': [{'START': 21752, 'STOP': 21790, 'ORIENTATION': '-', 'GENE_NAME': 'focD'}], 'lcl|ECI-2866|NODE_14_length_130829_cov_35.5941_ID_27': [{'START': 31541, 'STOP': 34063, 'ORIENTATION': '+', 'GENE_NAME': 'stgC'}, {'START': 82988, 'STOP': 84865, 'ORIENTATION': '+', 'GENE_NAME': 'espL3'}, {'START': 34074, 'STOP': 35147, 'ORIENTATION': '+', 'GENE_NAME': 'stgD'}, {'START': 30785, 'STOP': 31516, 'ORIENTATION': '+', 'GENE_NAME': 'stgB'}, {'START': 30165, 'STOP': 30737, 'ORIENTATION': '+', 'GENE_NAME': 'stgA'}, {'START': 30165, 'STOP': 30737, 'ORIENTATION': '+', 'GENE_NAME': 'lpfao113'}, {'START': 130517, 'STOP': 130829, 'ORIENTATION': '+', 'GENE_NAME': 'cdiB'}, {'START': 130658, 'STOP': 130829, 'ORIENTATION': '+', 'GENE_NAME': 'cdiB'}], 'lcl|ECI-2866|NODE_21_length_94236_cov_29.797_ID_41': [{'START': 49622, 'STOP': 51265, 'ORIENTATION': '-', 'GENE_NAME': 'flgK'}, {'START': 55827, 'STOP': 57032, 'ORIENTATION': '-', 'GENE_NAME': 'flgE'}, {'START': 52272, 'STOP': 53369, 'ORIENTATION': '-', 'GENE_NAME': 'flgI'}, {'START': 51331, 'STOP': 52272, 'ORIENTATION': '-', 'GENE_NAME': 'flgJ'}, {'START': 48657, 'STOP': 49610, 'ORIENTATION': '-', 'GENE_NAME': 'flgL'}, {'START': 87924, 'STOP': 88757, 'ORIENTATION': '+', 'GENE_NAME': 'csgG'}, {'START': 54132, 'STOP': 54914, 'ORIENTATION': '-', 'GENE_NAME': 'flgG'}, {'START': 55052, 'STOP': 55807, 'ORIENTATION': '-', 'GENE_NAME': 'flgF'}, {'START': 53381, 'STOP': 54079, 'ORIENTATION': '-', 'GENE_NAME': 'flgH'}, {'START': 57057, 'STOP': 57752, 'ORIENTATION': '-', 'GENE_NAME': 'flgD'}, {'START': 86412, 'STOP': 87062, 'ORIENTATION': '+', 'GENE_NAME': 'csgD'}, {'START': 58743, 'STOP': 59402, 'ORIENTATION': '+', 'GENE_NAME': 'flgA'}, {'START': 8349, 'STOP': 9136, 'ORIENTATION': '+', 'GENE_NAME': 'ycfz'}, {'START': 85203, 'STOP': 85685, 'ORIENTATION': '-', 'GENE_NAME': 'csgB'}, {'START': 85203, 'STOP': 85658, 'ORIENTATION': '-', 'GENE_NAME': 'csgB'}, {'START': 87481, 'STOP': 87897, 'ORIENTATION': '+', 'GENE_NAME': 'csgF'}, {'START': 58172, 'STOP': 58588, 'ORIENTATION': '-', 'GENE_NAME': 'flgB'}, {'START': 59776, 'STOP': 60192, 'ORIENTATION': '+', 'GENE_NAME': 'flgN'}, {'START': 57764, 'STOP': 58168, 'ORIENTATION': '-', 'GENE_NAME': 'flgC'}, {'START': 87067, 'STOP': 87456, 'ORIENTATION': '+', 'GENE_NAME': 'csgE'}, {'START': 84707, 'STOP': 85162, 'ORIENTATION': '-', 'GENE_NAME': 'csgA'}, {'START': 84316, 'STOP': 84648, 'ORIENTATION': '-', 'GENE_NAME': 'csgC'}], 'lcl|ECI-2866|NODE_36_length_35992_cov_31.0701_ID_71': [{'START': 13181, 'STOP': 16705, 'ORIENTATION': '-', 'GENE_NAME': 'aec30'}, {'START': 13181, 'STOP': 16645, 'ORIENTATION': '-', 'GENE_NAME': 'aec30'}, {'START': 13181, 'STOP': 16614, 'ORIENTATION': '-', 'GENE_NAME': 'icmF'}, {'START': 13181, 'STOP': 16288, 'ORIENTATION': '-', 'GENE_NAME': 'aec30'}, {'START': 13181, 'STOP': 16085, 'ORIENTATION': '-', 'GENE_NAME': 'aec30'}, {'START': 18881, 'STOP': 21646, 'ORIENTATION': '-', 'GENE_NAME': 'clpV'}, {'START': 18881, 'STOP': 21760, 'ORIENTATION': '-', 'GENE_NAME': 'clpV'}, {'START': 32611, 'STOP': 34752, 'ORIENTATION': '+', 'GENE_NAME': 'vgrG'}, {'START': 26627, 'STOP': 28477, 'ORIENTATION': '-', 'GENE_NAME': 'UMNK88'}, {'START': 26627, 'STOP': 28477, 'ORIENTATION': '-', 'GENE_NAME': 'ECO111'}, {'START': 26627, 'STOP': 28477, 'ORIENTATION': '-', 'GENE_NAME': 'O3M'}, {'START': 26627, 'STOP': 28477, 'ORIENTATION': '-', 'GENE_NAME': 'ECO103'}, {'START': 26627, 'STOP': 28477, 'ORIENTATION': '-', 'GENE_NAME': 'G2583'}, {'START': 26627, 'STOP': 28477, 'ORIENTATION': '-', 'GENE_NAME': 'ECs0229'}, {'START': 26627, 'STOP': 28477, 'ORIENTATION': '-', 'GENE_NAME': 'Z0260'}, {'START': 26627, 'STOP': 28477, 'ORIENTATION': '-', 'GENE_NAME': 'EcE24377A'}, {'START': 20064, 'STOP': 21646, 'ORIENTATION': '-', 'GENE_NAME': 'clpV'}, {'START': 32611, 'STOP': 34675, 'ORIENTATION': '+', 'GENE_NAME': 'aec15'}, {'START': 32600, 'STOP': 34675, 'ORIENTATION': '+', 'GENE_NAME': 'aec15'}, {'START': 28901, 'STOP': 30376, 'ORIENTATION': '-', 'GENE_NAME': 'aec18'}, {'START': 11818, 'STOP': 13260, 'ORIENTATION': '-', 'GENE_NAME': 'aec31'}, {'START': 16724, 'STOP': 18136, 'ORIENTATION': '-', 'GENE_NAME': 'aec29'}, {'START': 11861, 'STOP': 13260, 'ORIENTATION': '-', 'GENE_NAME': 'aec31'}, {'START': 28988, 'STOP': 30376, 'ORIENTATION': '-', 'GENE_NAME': 'aec18'}, {'START': 11818, 'STOP': 13170, 'ORIENTATION': '-', 'GENE_NAME': 'aec31'}, {'START': 22421, 'STOP': 23752, 'ORIENTATION': '-', 'GENE_NAME': 'aec25'}, {'START': 11861, 'STOP': 13170, 'ORIENTATION': '-', 'GENE_NAME': 'aec31'}, {'START': 24276, 'STOP': 25577, 'ORIENTATION': '-', 'GENE_NAME': 'aec23'}, {'START': 24276, 'STOP': 25556, 'ORIENTATION': '-', 'GENE_NAME': 'aec23'}, {'START': 25581, 'STOP': 26663, 'ORIENTATION': '-', 'GENE_NAME': 'aec22'}, {'START': 25581, 'STOP': 26669, 'ORIENTATION': '-', 'GENE_NAME': 'aec22'}, {'START': 24276, 'STOP': 25286, 'ORIENTATION': '-', 'GENE_NAME': 'aec23'}, {'START': 29488, 'STOP': 30376, 'ORIENTATION': '-', 'GENE_NAME': 'aec18'}, {'START': 21655, 'STOP': 22416, 'ORIENTATION': '-', 'GENE_NAME': 'aec26'}, {'START': 18141, 'STOP': 18884, 'ORIENTATION': '-', 'GENE_NAME': 'aec28'}, {'START': 16724, 'STOP': 17518, 'ORIENTATION': '-', 'GENE_NAME': 'aec29'}, {'START': 23755, 'STOP': 24279, 'ORIENTATION': '-', 'GENE_NAME': 'aec24'}, {'START': 30686, 'STOP': 31186, 'ORIENTATION': '-', 'GENE_NAME': 'aec17'}, {'START': 31883, 'STOP': 32401, 'ORIENTATION': '+', 'GENE_NAME': 'hcp'}, {'START': 31883, 'STOP': 32401, 'ORIENTATION': '+', 'GENE_NAME': 'aec16'}, {'START': 11312, 'STOP': 11794, 'ORIENTATION': '-', 'GENE_NAME': 'aec32'}, {'START': 28481, 'STOP': 28894, 'ORIENTATION': '-', 'GENE_NAME': 'aec19'}, {'START': 30427, 'STOP': 30651, 'ORIENTATION': '-', 'GENE_NAME': 'Z0263'}, {'START': 31613, 'STOP': 31756, 'ORIENTATION': '+', 'GENE_NAME': 'Z0265'}], 'lcl|ECI-2866|NODE_22_length_88582_cov_33.0406_ID_43': [{'START': 37513, 'STOP': 37651, 'ORIENTATION': '-', 'GENE_NAME': 'aslA'}, {'START': 80711, 'STOP': 80745, 'ORIENTATION': '-', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_2_length_413768_cov_33.1857_ID_3': [{'START': 398693, 'STOP': 400780, 'ORIENTATION': '+', 'GENE_NAME': 'c3610'}, {'START': 398693, 'STOP': 400780, 'ORIENTATION': '+', 'GENE_NAME': 'iha'}, {'START': 242573, 'STOP': 243949, 'ORIENTATION': '+', 'GENE_NAME': 'ygeH'}, {'START': 250669, 'STOP': 251789, 'ORIENTATION': '-', 'GENE_NAME': 'epaS'}, {'START': 253490, 'STOP': 254477, 'ORIENTATION': '-', 'GENE_NAME': 'epaO'}, {'START': 412793, 'STOP': 413696, 'ORIENTATION': '+', 'GENE_NAME': 'CAC39286'}, {'START': 412793, 'STOP': 413696, 'ORIENTATION': '+', 'GENE_NAME': 'espI'}, {'START': 363054, 'STOP': 364034, 'ORIENTATION': '-', 'GENE_NAME': 'yggr'}, {'START': 247037, 'STOP': 247771, 'ORIENTATION': '-', 'GENE_NAME': 'eprK'}, {'START': 248373, 'STOP': 249107, 'ORIENTATION': '-', 'GENE_NAME': 'eprH'}, {'START': 252836, 'STOP': 253500, 'ORIENTATION': '-', 'GENE_NAME': 'epaP'}, {'START': 244939, 'STOP': 245571, 'ORIENTATION': '-', 'GENE_NAME': 'cs3'}, {'START': 384200, 'STOP': 384954, 'ORIENTATION': '-', 'GENE_NAME': 'tia'}, {'START': 213630, 'STOP': 214193, 'ORIENTATION': '-', 'GENE_NAME': 'ppdb'}, {'START': 384200, 'STOP': 384940, 'ORIENTATION': '-', 'GENE_NAME': 'hra1-3'}, {'START': 246440, 'STOP': 247021, 'ORIENTATION': '-', 'GENE_NAME': 'orgA'}, {'START': 384200, 'STOP': 384773, 'ORIENTATION': '-', 'GENE_NAME': 'tia'}, {'START': 244418, 'STOP': 244894, 'ORIENTATION': '+', 'GENE_NAME': 'b2854'}, {'START': 214184, 'STOP': 214654, 'ORIENTATION': '-', 'GENE_NAME': 'ppda'}, {'START': 249812, 'STOP': 250312, 'ORIENTATION': '+', 'GENE_NAME': 'etrA'}, {'START': 241752, 'STOP': 242238, 'ORIENTATION': '+', 'GENE_NAME': 'ygeG'}, {'START': 252115, 'STOP': 252578, 'ORIENTATION': '-', 'GENE_NAME': 'epaR'}, {'START': 245792, 'STOP': 246223, 'ORIENTATION': '-', 'GENE_NAME': 'orgB'}, {'START': 213226, 'STOP': 213633, 'ORIENTATION': '-', 'GENE_NAME': 'ygdb'}, {'START': 244520, 'STOP': 244919, 'ORIENTATION': '+', 'GENE_NAME': 'iagB'}, {'START': 247768, 'STOP': 248099, 'ORIENTATION': '-', 'GENE_NAME': 'eprJ'}, {'START': 212918, 'STOP': 213241, 'ORIENTATION': '-', 'GENE_NAME': 'ppdc'}, {'START': 252567, 'STOP': 252826, 'ORIENTATION': '-', 'GENE_NAME': 'epaQ'}, {'START': 248119, 'STOP': 248359, 'ORIENTATION': '-', 'GENE_NAME': 'eprI'}, {'START': 413624, 'STOP': 413768, 'ORIENTATION': '+', 'GENE_NAME': 'espP'}, {'START': 254437, 'STOP': 254564, 'ORIENTATION': '-', 'GENE_NAME': 'eivJ'}], 'lcl|ECI-2866|NODE_19_length_99613_cov_36.368_ID_37': [{'START': 83233, 'STOP': 86613, 'ORIENTATION': '-', 'GENE_NAME': 'upaG/ehaG'}, {'START': 83233, 'STOP': 85878, 'ORIENTATION': '-', 'GENE_NAME': 'upaG/ehaG'}, {'START': 1, 'STOP': 1695, 'ORIENTATION': '+', 'GENE_NAME': 'cdiA'}, {'START': 323, 'STOP': 1695, 'ORIENTATION': '+', 'GENE_NAME': 'cdiA'}, {'START': 18925, 'STOP': 19680, 'ORIENTATION': '+', 'GENE_NAME': 'tia'}, {'START': 18934, 'STOP': 19680, 'ORIENTATION': '+', 'GENE_NAME': 'tia'}], 'lcl|ECI-2866|NODE_10_length_145407_cov_31.9536_ID_19': [{'START': 53744, 'STOP': 57997, 'ORIENTATION': '-', 'GENE_NAME': 'eae'}, {'START': 53744, 'STOP': 57934, 'ORIENTATION': '-', 'GENE_NAME': 'eae'}, {'START': 27172, 'STOP': 31221, 'ORIENTATION': '-', 'GENE_NAME': 'ehaA'}, {'START': 27172, 'STOP': 31155, 'ORIENTATION': '-', 'GENE_NAME': 'ehaA'}, {'START': 53750, 'STOP': 57997, 'ORIENTATION': '-', 'GENE_NAME': 'eae'}, {'START': 53787, 'STOP': 57997, 'ORIENTATION': '-', 'GENE_NAME': 'eae'}, {'START': 53787, 'STOP': 57997, 'ORIENTATION': '-', 'GENE_NAME': 'fdeC'}, {'START': 63021, 'STOP': 65546, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 63021, 'STOP': 65546, 'ORIENTATION': '+', 'GENE_NAME': 'ecpC'}, {'START': 65536, 'STOP': 67179, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 65536, 'STOP': 67179, 'ORIENTATION': '+', 'GENE_NAME': 'ecpD'}, {'START': 65560, 'STOP': 67179, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 67148, 'STOP': 67858, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 67103, 'STOP': 67858, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 67103, 'STOP': 67856, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 67103, 'STOP': 67856, 'ORIENTATION': '+', 'GENE_NAME': 'ecpE'}, {'START': 62278, 'STOP': 62995, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 67148, 'STOP': 67856, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 62327, 'STOP': 62995, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 62327, 'STOP': 62995, 'ORIENTATION': '+', 'GENE_NAME': 'ecpB'}, {'START': 67195, 'STOP': 67858, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 61682, 'STOP': 62269, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 61017, 'STOP': 61607, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 61682, 'STOP': 62269, 'ORIENTATION': '+', 'GENE_NAME': 'ecpA'}, {'START': 61653, 'STOP': 62269, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 61017, 'STOP': 61607, 'ORIENTATION': '+', 'GENE_NAME': 'ecpR'}, {'START': 61065, 'STOP': 61607, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 61820, 'STOP': 62269, 'ORIENTATION': '+', 'GENE_NAME': 'ECP'}, {'START': 115613, 'STOP': 115664, 'ORIENTATION': '+', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_65_length_4116_cov_46.3389_ID_129': [{'START': 1594, 'STOP': 4115, 'ORIENTATION': '+', 'GENE_NAME': 'cdiA'}, {'START': 1, 'STOP': 1581, 'ORIENTATION': '+', 'GENE_NAME': 'cdiB'}, {'START': 1594, 'STOP': 2431, 'ORIENTATION': '+', 'GENE_NAME': 'cdiA'}, {'START': 1270, 'STOP': 1581, 'ORIENTATION': '+', 'GENE_NAME': 'cdiB'}], 'lcl|ECI-2866|NODE_4_length_261081_cov_33.6293_ID_7': [{'START': 121134, 'STOP': 123309, 'ORIENTATION': '+', 'GENE_NAME': 'cadA'}, {'START': 252571, 'STOP': 254144, 'ORIENTATION': '+', 'GENE_NAME': 'espL4'}, {'START': 219797, 'STOP': 221377, 'ORIENTATION': '-', 'GENE_NAME': 'espX4'}, {'START': 188969, 'STOP': 190261, 'ORIENTATION': '+', 'GENE_NAME': 'espX5'}, {'START': 252571, 'STOP': 253710, 'ORIENTATION': '+', 'GENE_NAME': 'espL4'}], 'lcl|ECI-2866|NODE_283_length_368_cov_35.9461_ID_565': [{'START': 1, 'STOP': 368, 'ORIENTATION': '+', 'GENE_NAME': 'aec15'}, {'START': 1, 'STOP': 368, 'ORIENTATION': '+', 'GENE_NAME': 'vgrG'}], 'lcl|ECI-2866|NODE_109_length_1364_cov_53.3694_ID_217': [{'START': 1, 'STOP': 1349, 'ORIENTATION': '+', 'GENE_NAME': 'espP'}, {'START': 507, 'STOP': 1349, 'ORIENTATION': '+', 'GENE_NAME': 'epeA'}], 'lcl|ECI-2866|NODE_34_length_40896_cov_32.935_ID_67': [{'START': 23077, 'STOP': 24810, 'ORIENTATION': '-', 'GENE_NAME': 'ibeC'}, {'START': 23077, 'STOP': 24855, 'ORIENTATION': '-', 'GENE_NAME': 'ibeC'}, {'START': 23077, 'STOP': 24855, 'ORIENTATION': '-', 'GENE_NAME': 'yijP'}], 'lcl|ECI-2866|NODE_210_length_441_cov_0.780255_ID_419': [{'START': 1, 'STOP': 383, 'ORIENTATION': '-', 'GENE_NAME': 'ECO103'}, {'START': 1, 'STOP': 383, 'ORIENTATION': '-', 'GENE_NAME': 'ECNA114'}, {'START': 1, 'STOP': 383, 'ORIENTATION': '-', 'GENE_NAME': 'ECP'}, {'START': 1, 'STOP': 383, 'ORIENTATION': '-', 'GENE_NAME': 'APECO1'}, {'START': 1, 'STOP': 383, 'ORIENTATION': '-', 'GENE_NAME': 'c3401'}, {'START': 1, 'STOP': 352, 'ORIENTATION': '-', 'GENE_NAME': 'ECABU'}, {'START': 1, 'STOP': 212, 'ORIENTATION': '-', 'GENE_NAME': 'EcE24377A'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'ECO103'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'EC958'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'LF82'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'ECOK1'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'ECS88'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'ECNA114'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'i02'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'ECABU'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'APECO1'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'ECP'}, {'START': 364, 'STOP': 441, 'ORIENTATION': '-', 'GENE_NAME': 'c3400'}], 'lcl|ECI-2866|NODE_28_length_54599_cov_28.4892_ID_55': [{'START': 202, 'STOP': 252, 'ORIENTATION': '+', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_35_length_40076_cov_27.9792_ID_69': [{'START': 14025, 'STOP': 15355, 'ORIENTATION': '-', 'GENE_NAME': 'espR1'}, {'START': 14026, 'STOP': 15089, 'ORIENTATION': '-', 'GENE_NAME': 'espR1'}, {'START': 15070, 'STOP': 15394, 'ORIENTATION': '-', 'GENE_NAME': 'espR2'}], 'lcl|ECI-2866|NODE_24_length_71378_cov_29.2686_ID_47': [{'START': 51550, 'STOP': 54201, 'ORIENTATION': '+', 'GENE_NAME': 'fimD'}, {'START': 51550, 'STOP': 54201, 'ORIENTATION': '+', 'GENE_NAME': 'Z2203'}, {'START': 55320, 'STOP': 56234, 'ORIENTATION': '+', 'GENE_NAME': 'Z2206'}, {'START': 50789, 'STOP': 51508, 'ORIENTATION': '+', 'GENE_NAME': 'Z2201'}, {'START': 49874, 'STOP': 50437, 'ORIENTATION': '+', 'GENE_NAME': 'Z2200'}, {'START': 54215, 'STOP': 54745, 'ORIENTATION': '+', 'GENE_NAME': 'Z2204'}, {'START': 54215, 'STOP': 54745, 'ORIENTATION': '+', 'GENE_NAME': 'fimF'}, {'START': 54758, 'STOP': 55261, 'ORIENTATION': '+', 'GENE_NAME': 'fimG'}, {'START': 54758, 'STOP': 55261, 'ORIENTATION': '+', 'GENE_NAME': 'Z2205'}], 'lcl|ECI-2866|NODE_29_length_51952_cov_29.7413_ID_57': [{'START': 31250, 'STOP': 31981, 'ORIENTATION': '+', 'GENE_NAME': 'artj'}], 'lcl|ECI-2866|NODE_5_length_211409_cov_32.4567_ID_9': [{'START': 98098, 'STOP': 99483, 'ORIENTATION': '+', 'GENE_NAME': 'hcp'}, {'START': 194600, 'STOP': 196021, 'ORIENTATION': '+', 'GENE_NAME': 'espX1'}, {'START': 99473, 'STOP': 100675, 'ORIENTATION': '+', 'GENE_NAME': 'hcp'}, {'START': 97648, 'STOP': 98088, 'ORIENTATION': '+', 'GENE_NAME': 'hcp'}], 'lcl|ECI-2866|NODE_41_length_26475_cov_27.3951_ID_81': [{'START': 1, 'STOP': 1057, 'ORIENTATION': '-', 'GENE_NAME': 'aec15'}, {'START': 5, 'STOP': 1093, 'ORIENTATION': '-', 'GENE_NAME': 'aec15'}, {'START': 5, 'STOP': 1057, 'ORIENTATION': '-', 'GENE_NAME': 'aec15'}, {'START': 1, 'STOP': 1057, 'ORIENTATION': '-', 'GENE_NAME': 'vgrG'}], 'lcl|ECI-2866|NODE_1_length_488407_cov_30.2969_ID_1': [{'START': 14178, 'STOP': 15800, 'ORIENTATION': '-', 'GENE_NAME': 'nadb'}, {'START': 270497, 'STOP': 271491, 'ORIENTATION': '-', 'GENE_NAME': 'flk'}], 'lcl|ECI-2866|NODE_3_length_280483_cov_33.8271_ID_5': [{'START': 174238, 'STOP': 176940, 'ORIENTATION': '+', 'GENE_NAME': 'cfa'}, {'START': 174256, 'STOP': 176056, 'ORIENTATION': '+', 'GENE_NAME': 'cfa'}, {'START': 174559, 'STOP': 176940, 'ORIENTATION': '+', 'GENE_NAME': 'cfa'}, {'START': 176937, 'STOP': 178025, 'ORIENTATION': '+', 'GENE_NAME': 'cfa'}, {'START': 176991, 'STOP': 178025, 'ORIENTATION': '+', 'GENE_NAME': 'cfa'}, {'START': 107619, 'STOP': 108503, 'ORIENTATION': '+', 'GENE_NAME': 'ECS88'}, {'START': 172918, 'STOP': 173634, 'ORIENTATION': '+', 'GENE_NAME': 'cfa'}, {'START': 173664, 'STOP': 174164, 'ORIENTATION': '+', 'GENE_NAME': 'cfa'}, {'START': 177359, 'STOP': 178025, 'ORIENTATION': '+', 'GENE_NAME': 'cfa'}], 'lcl|ECI-2866|NODE_20_length_98076_cov_34.1972_ID_39': [{'START': 55393, 'STOP': 58512, 'ORIENTATION': '-', 'GENE_NAME': 'agn43'}, {'START': 34983, 'STOP': 37043, 'ORIENTATION': '+', 'GENE_NAME': 'gspD'}, {'START': 87782, 'STOP': 89569, 'ORIENTATION': '+', 'GENE_NAME': 'EC55989'}, {'START': 87782, 'STOP': 89569, 'ORIENTATION': '+', 'GENE_NAME': 'EC042'}, {'START': 87905, 'STOP': 89569, 'ORIENTATION': '+', 'GENE_NAME': 'O3M'}, {'START': 37043, 'STOP': 38536, 'ORIENTATION': '+', 'GENE_NAME': 'gspE'}, {'START': 38536, 'STOP': 39759, 'ORIENTATION': '+', 'GENE_NAME': 'gspF'}, {'START': 42739, 'STOP': 43917, 'ORIENTATION': '+', 'GENE_NAME': 'gspL'}, {'START': 55393, 'STOP': 56578, 'ORIENTATION': '-', 'GENE_NAME': 'EC958'}, {'START': 55393, 'STOP': 56706, 'ORIENTATION': '-', 'GENE_NAME': 'cah'}, {'START': 55393, 'STOP': 56578, 'ORIENTATION': '-', 'GENE_NAME': 'cah'}, {'START': 55393, 'STOP': 56565, 'ORIENTATION': '-', 'GENE_NAME': 'cah'}, {'START': 55393, 'STOP': 56706, 'ORIENTATION': '-', 'GENE_NAME': 'EC958'}, {'START': 41765, 'STOP': 42742, 'ORIENTATION': '+', 'GENE_NAME': 'gspK'}, {'START': 33994, 'STOP': 34953, 'ORIENTATION': '+', 'GENE_NAME': 'gspC'}, {'START': 32691, 'STOP': 33500, 'ORIENTATION': '+', 'GENE_NAME': 'b2972'}, {'START': 89992, 'STOP': 90780, 'ORIENTATION': '+', 'GENE_NAME': 'EC55989'}, {'START': 41163, 'STOP': 41762, 'ORIENTATION': '+', 'GENE_NAME': 'gspJ'}, {'START': 40235, 'STOP': 40798, 'ORIENTATION': '+', 'GENE_NAME': 'gspH'}, {'START': 43919, 'STOP': 44455, 'ORIENTATION': '+', 'GENE_NAME': 'gspM'}, {'START': 39776, 'STOP': 40231, 'ORIENTATION': '+', 'GENE_NAME': 'gspG'}, {'START': 33566, 'STOP': 33976, 'ORIENTATION': '+', 'GENE_NAME': 'yghg'}, {'START': 40795, 'STOP': 41166, 'ORIENTATION': '+', 'GENE_NAME': 'gspI'}, {'START': 89644, 'STOP': 89940, 'ORIENTATION': '+', 'GENE_NAME': 'EC042'}, {'START': 89644, 'STOP': 89940, 'ORIENTATION': '+', 'GENE_NAME': 'O3M'}, {'START': 32553, 'STOP': 32597, 'ORIENTATION': '-', 'GENE_NAME': 'entD'}], 'lcl|ECI-2866|NODE_145_length_772_cov_18.1659_ID_289': [{'START': 1, 'STOP': 112, 'ORIENTATION': '+', 'GENE_NAME': 'epeA'}, {'START': 1, 'STOP': 112, 'ORIENTATION': '+', 'GENE_NAME': 'CAC39286'}, {'START': 1, 'STOP': 112, 'ORIENTATION': '+', 'GENE_NAME': 'espI'}, {'START': 1, 'STOP': 112, 'ORIENTATION': '+', 'GENE_NAME': 'espP'}], 'lcl|ECI-2866|NODE_30_length_50634_cov_30.1661_ID_59': [{'START': 21407, 'STOP': 22462, 'ORIENTATION': '+', 'GENE_NAME': 'hlye'}, {'START': 21551, 'STOP': 22462, 'ORIENTATION': '+', 'GENE_NAME': 'hlye'}, {'START': 21545, 'STOP': 22462, 'ORIENTATION': '+', 'GENE_NAME': 'hlye'}, {'START': 21551, 'STOP': 22398, 'ORIENTATION': '+', 'GENE_NAME': 'hlye'}, {'START': 21551, 'STOP': 22061, 'ORIENTATION': '+', 'GENE_NAME': 'hlye'}, {'START': 22181, 'STOP': 22462, 'ORIENTATION': '+', 'GENE_NAME': 'hlye'}, {'START': 22202, 'STOP': 22462, 'ORIENTATION': '+', 'GENE_NAME': 'EC958'}, {'START': 22181, 'STOP': 22398, 'ORIENTATION': '+', 'GENE_NAME': 'hlye'}], 'lcl|ECI-2866|NODE_51_length_8356_cov_18.7279_ID_101': [{'START': 8044, 'STOP': 8356, 'ORIENTATION': '+', 'GENE_NAME': 'cdiB'}, {'START': 8185, 'STOP': 8356, 'ORIENTATION': '+', 'GENE_NAME': 'cdiB'}], 'lcl|ECI-2866|NODE_17_length_104608_cov_35.3736_ID_33': [{'START': 54782, 'STOP': 55306, 'ORIENTATION': '+', 'GENE_NAME': 'fimI'}]}, 'Serotype': {'O type': 'O22', 'H type': 'H8'}} | 10,788.333333 | 32,261 | 0.601792 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21,173 | 0.654194 |
d7c9ba9ae60b31b077eadce9426f0130ab05c043 | 276 | py | Python | totality/test_main.py | str8d8a/totality-python | cebfd7df880dd42da86fb3094676dd62d2e6d99a | [
"MIT"
] | null | null | null | totality/test_main.py | str8d8a/totality-python | cebfd7df880dd42da86fb3094676dd62d2e6d99a | [
"MIT"
] | null | null | null | totality/test_main.py | str8d8a/totality-python | cebfd7df880dd42da86fb3094676dd62d2e6d99a | [
"MIT"
] | null | null | null | from totality import Totality, Node, NodeId
def test_basic():
t = Totality()
coll = t.create_collection(username="system")
node_id = NodeId(node_type="facility")
node = Node(node_id, 34, -120, collection=coll)
print(node.to_doc())
assert t is not None | 30.666667 | 51 | 0.684783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.065217 |
d7ca58134511fa6a746383bef2993e9b0fa529e5 | 3,630 | py | Python | projects/DensePose/densepose/modeling/test_time_augmentation.py | bruce1408/detectron2_modify | 815df8c3cd68b1450e039fbafc27c6ab302d620b | [
"Apache-2.0"
] | 5 | 2020-01-17T12:27:00.000Z | 2020-05-06T11:47:25.000Z | projects/DensePose/densepose/modeling/test_time_augmentation.py | bruce1408/detectron2_modify | 815df8c3cd68b1450e039fbafc27c6ab302d620b | [
"Apache-2.0"
] | 3 | 2021-06-08T21:51:10.000Z | 2022-01-13T02:53:58.000Z | projects/DensePose/densepose/modeling/test_time_augmentation.py | bruce1408/detectron2_modify | 815df8c3cd68b1450e039fbafc27c6ab302d620b | [
"Apache-2.0"
] | 2 | 2020-01-17T12:49:55.000Z | 2020-02-25T12:17:31.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from fvcore.transforms import HFlipTransform
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.modeling.test_time_augmentation import GeneralizedRCNNWithTTA
class DensePoseGeneralizedRCNNWithTTA(GeneralizedRCNNWithTTA):
def __init__(self, cfg, model, transform_data, tta_mapper=None, batch_size=1):
"""
Args:
cfg (CfgNode):
model (GeneralizedRCNN): a GeneralizedRCNN to apply TTA on.
transform_data (DensePoseTransformData): contains symmetry label
transforms used for horizontal flip
tta_mapper (callable): takes a dataset dict and returns a list of
augmented versions of the dataset dict. Defaults to
`DatasetMapperTTA(cfg)`.
batch_size (int): batch the augmented images into this batch size for inference.
"""
self._transform_data = transform_data
super().__init__(cfg=cfg, model=model, tta_mapper=tta_mapper, batch_size=batch_size)
# the implementation follows closely the one from detectron2/modeling
def _inference_one_image(self, input):
"""
Args:
input (dict): one dataset dict with "image" field being a CHW tensor
Returns:
dict: one output dict
"""
orig_shape = (input["height"], input["width"])
# For some reason, resize with uint8 slightly increases box AP but decreases densepose AP
input["image"] = input["image"].to(torch.uint8)
augmented_inputs, tfms = self._get_augmented_inputs(input)
# Detect boxes from all augmented versions
with self._turn_off_roi_heads(["mask_on", "keypoint_on", "densepose_on"]):
# temporarily disable roi heads
all_boxes, all_scores, all_classes = self._get_augmented_boxes(augmented_inputs, tfms)
merged_instances = self._merge_detections(all_boxes, all_scores, all_classes, orig_shape)
if self.cfg.MODEL.MASK_ON or self.cfg.MODEL.DENSEPOSE_ON:
# Use the detected boxes to obtain new fields
augmented_instances = self._rescale_detected_boxes(
augmented_inputs, merged_instances, tfms
)
# run forward on the detected boxes
outputs = self._batch_inference(augmented_inputs, augmented_instances)
# Delete now useless variables to avoid being out of memory
del augmented_inputs, augmented_instances
# average the predictions
if self.cfg.MODEL.MASK_ON:
merged_instances.pred_masks = self._reduce_pred_masks(outputs, tfms)
if self.cfg.MODEL.DENSEPOSE_ON:
merged_instances.pred_densepose = self._reduce_pred_densepose(outputs, tfms)
# postprocess
merged_instances = detector_postprocess(merged_instances, *orig_shape)
return {"instances": merged_instances}
else:
return {"instances": merged_instances}
def _reduce_pred_densepose(self, outputs, tfms):
for output, tfm in zip(outputs, tfms):
if any(isinstance(t, HFlipTransform) for t in tfm.transforms):
output.pred_densepose.hflip(self._transform_data)
# Less memory-intensive averaging
for attr in "SIUV":
setattr(
outputs[0].pred_densepose,
attr,
sum(getattr(o.pred_densepose, attr) for o in outputs) / len(outputs),
)
return outputs[0].pred_densepose
| 47.142857 | 98 | 0.662534 | 3,351 | 0.92314 | 0 | 0 | 0 | 0 | 0 | 0 | 1,303 | 0.358953 |
d7ca9ef5912d23f52e3b73b63d9991b17b4b8ce7 | 1,343 | py | Python | src/compath_resources/summarize.py | ComPath/resources | e8da7b511c2b558b8fd0bf38888b512008ac1ba3 | [
"MIT"
] | 3 | 2018-05-14T14:46:39.000Z | 2019-06-20T10:28:26.000Z | src/compath_resources/summarize.py | ComPath/compath-resources | e8da7b511c2b558b8fd0bf38888b512008ac1ba3 | [
"MIT"
] | 13 | 2020-03-28T13:36:32.000Z | 2021-01-19T15:00:07.000Z | src/compath_resources/summarize.py | ComPath/resources | e8da7b511c2b558b8fd0bf38888b512008ac1ba3 | [
"MIT"
] | 1 | 2021-12-01T09:49:59.000Z | 2021-12-01T09:49:59.000Z | # -*- coding: utf-8 -*-
"""Generate charts for the ComPath GitHub Pages site."""
import click
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from more_click import verbose_option
from compath_resources import get_df
from compath_resources.constants import DATA_DIRECTORY, IMG_DIRECTORY
__all__ = [
'charts',
]
@click.command()
@verbose_option
def charts():
"""Generate the summary for ComPath."""
sns.set_theme(style="darkgrid")
df = get_df(include_reactome_hierarchy=False, include_decopath=True, include_special=True)
df.to_csv(DATA_DIRECTORY.joinpath('compath.tsv'), sep='\t', index=False)
prefix_df = pd.concat([df['source prefix'], df['target prefix']]).to_frame()
prefix_df.columns = ['Prefix']
fig, axes = plt.subplots(1, 2, figsize=(12, 4), sharey=True)
sns.countplot(data=prefix_df, x='Prefix', ax=axes[0])
sns.countplot(data=df, x='relation', ax=axes[1])
axes[0].set_xlabel('')
axes[0].set_title('By Prefix')
axes[1].set_xlabel('')
axes[1].set_title('By Type')
axes[1].set_ylabel('')
plt.suptitle(f'Summary of {len(df.index)} ComPath Mappings')
plt.tight_layout()
plt.savefig(IMG_DIRECTORY / 'prefixes.svg')
plt.savefig(IMG_DIRECTORY / 'prefixes.png', dpi=300)
plt.close(fig)
if __name__ == '__main__':
charts()
| 28.574468 | 94 | 0.693969 | 0 | 0 | 0 | 0 | 953 | 0.709605 | 0 | 0 | 319 | 0.237528 |
d7ceec6aeb525832eb8df2671f3394c484971cd3 | 3,681 | py | Python | src/database/insert_data.py | johnnychiuchiu/Music-Recommender | 6fd239e1b3bdd8f7687d393ae7eee9bc77d4fe3d | [
"FTL",
"Spencer-86",
"Spencer-94",
"Spencer-99"
] | null | null | null | src/database/insert_data.py | johnnychiuchiu/Music-Recommender | 6fd239e1b3bdd8f7687d393ae7eee9bc77d4fe3d | [
"FTL",
"Spencer-86",
"Spencer-94",
"Spencer-99"
] | 4 | 2018-02-10T02:04:12.000Z | 2018-03-22T23:58:01.000Z | src/database/insert_data.py | johnnychiuchiu/Music-Recommender | 6fd239e1b3bdd8f7687d393ae7eee9bc77d4fe3d | [
"FTL",
"Spencer-86",
"Spencer-94",
"Spencer-99"
] | 3 | 2018-03-22T13:10:20.000Z | 2019-05-27T21:48:14.000Z | import pandas as pd
import os
from schema import db
import random
class ReadData():
"""
Acquire song data from the url provided by Turi and save it into local database.
"""
def __init__(self):
self.SEED = 12345
def readSongData(self):
"""
read song data from the url provided by Turi. If the data has already exist, then read data from pickle file.
Returns:
pd.DataFrame: a dataframe contain the data needed for building the recommender system
"""
if 'song.pkl' in os.listdir('../../data'):
song_df = pd.read_pickle('../../data/song.pkl')
else:
# Read userid-songid-listen_count triplets
# This step might take time to download data from external sources
triplets_file = 'https://static.turi.com/datasets/millionsong/10000.txt'
songs_metadata_file = 'https://static.turi.com/datasets/millionsong/song_data.csv'
song_df_1 = pd.read_table(triplets_file, header=None)
song_df_1.columns = ['user_id', 'song_id', 'listen_count']
# Read song metadata
song_df_2 = pd.read_csv(songs_metadata_file)
# Merge the two dataframes above to create input dataframe for recommender systems
song_df = pd.merge(song_df_1, song_df_2.drop_duplicates(['song_id']), on="song_id", how="left")
# Merge song title and artist_name columns to make a merged column
song_df['song'] = song_df['title'].map(str) + " - " + song_df['artist_name']
n_users = song_df.user_id.unique().shape[0]
n_items = song_df.song_id.unique().shape[0]
print(str(n_users) + ' users')
print(str(n_items) + ' items')
song_df.to_pickle('../data/song.pkl')
# # keep top_n rows of the data
# song_df = song_df.head(top)
song_df = self.drop_freq_low(song_df)
return(song_df)
def drop_freq_low(self, song_df):
"""
delete user who listen to less than 5 songs
Args:
song_df (pd.DataFrame): a dataframe containing song data
Returns:
pd.DataFrame: a dataframe without users who listen to less than 5 songs
"""
freq_df = song_df.groupby(['user_id']).agg({'song_id': 'count'}).reset_index(level=['user_id'])
below_userid = freq_df[freq_df.song_id <= 5]['user_id']
new_song_df = song_df[~song_df.user_id.isin(below_userid)]
return(new_song_df)
def random_select_user(self, song_df, n):
"""
randomly select n users from the song dataframe
Args:
song_df (pd.DataFrame): a dataframe containing song data
n (int): number of users
Returns:
pd.DataFrame: a dataframe containing song data from n number of users
"""
# random sample n users from song_df
user_list = list(song_df.user_id.unique())
random.seed(self.SEED)
random.shuffle(user_list)
song_df = song_df[song_df.user_id.isin(user_list[0:n])]
return song_df
if __name__=='__main__':
# read song data as dataframe
song_df = ReadData().readSongData()
# random sample n users
randomsong_df = ReadData().random_select_user(song_df, 10)
# # connect to sqlite database
# conn = dbConn('../../data/song2.sqlite')
#
# # insert the dataframe into local database
# song_df.to_sql(name='Song', con=conn, if_exists='replace', index=True)
# insert the dataframe into RDS database
song_df.to_sql("Song", db.engine, if_exists='replace', index=False)
print("Song Data Inserted")
| 33.770642 | 117 | 0.626189 | 3,053 | 0.829394 | 0 | 0 | 0 | 0 | 0 | 0 | 1,904 | 0.517251 |
d7d0069da869d2dfb8343d1ac1da553ed1bc346a | 1,245 | py | Python | Codes/Prediction/RF_Prediction.py | sepehrgdr/Mode_Imputation | bd7c17d05beacdbdf2f4c9fdefa3062a253607c8 | [
"BSD-3-Clause"
] | null | null | null | Codes/Prediction/RF_Prediction.py | sepehrgdr/Mode_Imputation | bd7c17d05beacdbdf2f4c9fdefa3062a253607c8 | [
"BSD-3-Clause"
] | null | null | null | Codes/Prediction/RF_Prediction.py | sepehrgdr/Mode_Imputation | bd7c17d05beacdbdf2f4c9fdefa3062a253607c8 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 28 14:24:27 2019
@author: adarzi
"""
#Loading the libraries
import pandas as pd
import os
from os import sys
import pickle
#setting the directory
os.chdir(sys.path[0])
#loading the data:
data = pd.read_csv('../../Inputs/Trip_Data/AirSage_Data/trips_long_distance.csv')
#adding mode attributes to the data
data['mode']=0
#Predicting air trips
data.loc[data.loc[(data['trip_dist']>=50000) & (data['speed_Q75']>=100)].index.values,'mode']=4
#separating air trips from other trips
airtrips=data.loc[data['mode']==4]
df=data.loc[data['mode']==0]
#Loading data scaler model
datascaler=pickle.load(open('data_scaler.sav','rb'))
#Scaling test data
test_data=df[df.columns[2:34]]
test_data_scaled = datascaler.transform(test_data)
#loading the Random Forest model
RandomForest=pickle.load(open('Random_Forest.sav','rb'))
#Predicting other Modes
prediction=RandomForest.predict(test_data_scaled)
#adding the prediction results to the data
df.mode=prediction
#Combining all trips and saving
alltrips=df.append(airtrips)
alltrips=pd.DataFrame.sort_index(alltrips)
alltrips.to_csv('../../Inputs/Trip_Data/AirSage_Data/trips_long_distance_with_mode.csv')
| 26.489362 | 96 | 0.73494 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 650 | 0.522088 |
d7d0a76d7b4cce22fdb7bb20a546cca9f12ef41b | 340 | py | Python | abc153_e.py | tkt989/atcoder | d4f334f60c9d487a1d9c166cda3e2303d5db23e5 | [
"MIT"
] | null | null | null | abc153_e.py | tkt989/atcoder | d4f334f60c9d487a1d9c166cda3e2303d5db23e5 | [
"MIT"
] | null | null | null | abc153_e.py | tkt989/atcoder | d4f334f60c9d487a1d9c166cda3e2303d5db23e5 | [
"MIT"
] | null | null | null | import math
H, N = [int(n) for n in input().split()]
A = []
B = []
for _ in range(N):
a, b = [int(n) for n in input().split()]
A.append(a)
B.append(b)
p = []
for i in range(N):
p.append(A[i] / B[i])
for pp in p:
pass
# print(pp)
maisu = []
for i in range(N):
maisu.append(math.ceil(H / A[i]))
for m in maisu:
print(m) | 13.076923 | 42 | 0.538235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.032353 |
d7d12e722f235c4570fb348b9967c5ef1d812876 | 7,467 | py | Python | util.py | ChannyHong/ISREncoder | be8145756d50582c371378aa846906d6f94a45f6 | [
"Apache-2.0"
] | 6 | 2020-01-16T17:43:36.000Z | 2021-04-25T08:52:38.000Z | util.py | ChannyHong/ISREncoder | be8145756d50582c371378aa846906d6f94a45f6 | [
"Apache-2.0"
] | null | null | null | util.py | ChannyHong/ISREncoder | be8145756d50582c371378aa846906d6f94a45f6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Superb AI, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Authors: Channy Hong, Jaeyeon Lee, Jung Kwon Lee.
Description: Useful functions and class definitions.
'''
import numpy as np
import random
import os
import six
LABEL_MAP = {
"entailment": 0,
"neutral": 1,
"contradiction": 2,
}
language_dict = {
'English': 'en',
'French': 'fr',
'Spanish': 'es',
'German': 'de',
'Greek': 'el',
'Bulgarian': 'bg',
'Russian': 'ru',
'Turkish': 'tr',
'Arabic': 'ar',
'Vietnamese': 'vi',
'Thai': 'th',
'Chinese': 'zh',
'Hindi': 'hi',
'Swahili': 'sw',
'Urdu': 'ur',
}
# Converts `text` to Unicode (if it's not already), assuming utf-8 input. Copied from BERT implementation
def convert_to_unicode(text):
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def parse_languages_into_abbreviation_list(languages):
return [language_dict[language] for language in languages.split(',')]
def create_language_reference(train_language_abbreviations):
language_reference = {}
for i, language_abbreviation in enumerate(train_language_abbreviations):
language_reference[language_abbreviation] = i
return language_reference
def convert_to_onehots(num_train_languages, labels):
onehots = []
for label in labels:
label_onehot = [0] * num_train_languages
label_onehot[label] = 1
onehots.append(label_onehot)
return onehots
def create_random_labels(num_train_languages, batch_size):
labels = []
for _ in range(batch_size):
labels.append(random.randrange(num_train_languages))
return labels
def create_xhat_alphas(batch_size):
xhat_alphas = []
for _ in range(batch_size):
xhat_alpha = random.uniform(0, 1)
xhat_alphas.append(xhat_alpha)
return xhat_alphas
def get_mc_minibatch(train_examples, step_num, batch_size, language_reference):
start_index = (step_num-1)*batch_size
end_index = step_num*batch_size
indices = range(start_index, end_index)
sentences = [train_examples[i].sentence for i in indices]
languages = [language_reference[train_examples[i].language] for i in indices]
return sentences, languages
def get_xnli_minibatch(train_examples, step_num, batch_size, language_reference):
start_index = (step_num-1)*batch_size
end_index = step_num*batch_size
indices = range(start_index, end_index)
premise_vectors = [train_examples[i].sentence1 for i in indices]
hypothesis_vectors = [train_examples[i].sentence2 for i in indices]
labels = [train_examples[i].label for i in indices]
languages = [language_reference[train_examples[i].language] for i in indices]
return premise_vectors, hypothesis_vectors, labels, languages
def convert_to_singles_from_pairs(train_example_in_pairs):
train_examples = []
for train_example_in_pair in train_example_in_pairs:
train_examples.append(InputSentence(sentence=train_example_in_pair.sentence1, language=train_example_in_pair.language))
train_examples.append(InputSentence(sentence=train_example_in_pair.sentence2, language=train_example_in_pair.language))
return train_examples
def get_mc_train_examples(data_dir, train_language_abbreviations):
train_examples = []
for language_abbreviation in train_language_abbreviations:
loaded_examples = np.load(os.path.join(data_dir, "mc_%s.npy" % language_abbreviation), allow_pickle=True)
for example in loaded_examples:
train_examples.append(InputSentence(sentence=example, language=language_abbreviation))
return train_examples
def get_xnli_train_examples(data_dir, train_language_abbreviations):
train_examples = []
for language_abbreviation in train_language_abbreviations:
loaded_examples = np.load(os.path.join(data_dir, "bse_%s.npy" % language_abbreviation), allow_pickle=True)
for example in loaded_examples:
train_examples.append(InputSentencePair(sentence1=example[0], sentence2=example[1], label=example[2], language=language_abbreviation))
return train_examples
def get_xnli_dev_examples(data_dir, language_abbreviations, in_pairs=True):
dev_examples = []
loaded_examples = np.load(os.path.join(data_dir, "DEV.npy"), allow_pickle=True)
if in_pairs:
for example in loaded_examples:
if example[3] in language_abbreviations:
dev_examples.append(InputSentencePair(sentence1=example[0], sentence2=example[1], label=example[2], language=example[3]))
else:
for example in loaded_examples:
if example[3] in language_abbreviations:
dev_examples.append(InputSentence(sentence=example[0], language=example[3]))
dev_examples.append(InputSentence(sentence=example[1], language=example[3]))
return dev_examples
def get_xnli_dev_examples_by_language(data_dir, language_abbreviations):
dev_examples_by_lang_dict = {}
dev_example_in_pairs = get_xnli_dev_examples(data_dir, language_abbreviations, True)
for language_abbreviation in language_abbreviations:
dev_examples_by_lang = []
for dev_example_in_pair in dev_example_in_pairs:
if dev_example_in_pair.language == language_abbreviation:
dev_examples_by_lang.append(dev_example_in_pair)
dev_examples_by_lang_dict[language_abbreviation] = dev_examples_by_lang
return dev_examples_by_lang_dict
# A single training/eval/test sentence for simple sequence classification.
class Minibatch(object):
def __init__(self, examples, num_train_languages, language_reference, with_ISR):
num_examples = len(examples)
self.prem_sentences = [example.sentence1 for example in examples]
self.hyp_sentences = [example.sentence2 for example in examples]
original_labels = [language_reference[example.language] for example in examples]
self.original_label_onehots = convert_to_onehots(num_train_languages, original_labels)
target_labels = create_random_labels(num_train_languages, num_examples)
self.target_label_onehots = convert_to_onehots(num_train_languages, target_labels)
self.xhat_alphas = create_xhat_alphas(num_examples)
self.nli_labels = None
if with_ISR:
self.nli_labels = [example.label for example in examples]
# A single training/eval/test sentence.
class InputSentence(object):
def __init__(self, sentence, language):
self.sentence = sentence
self.language = language
# A single training/eval/test sentence pair.
class InputSentencePair(object):
def __init__(self, sentence1, sentence2, language, label=None):
self.sentence1 = sentence1
self.sentence2 = sentence2
self.label = label
self.language = language
| 33.78733 | 140 | 0.759207 | 1,127 | 0.150931 | 0 | 0 | 0 | 0 | 0 | 0 | 1,321 | 0.176912 |
d7d42afdeeec54d2a3cf68a4146560be3acb0b13 | 5,676 | py | Python | shortening_calculations.py | joferkington/oost_paper_code | 6c6cf242a3fa6f4373c8ed528f6510f5fdf96d4d | [
"MIT"
] | 20 | 2015-06-25T19:56:45.000Z | 2021-05-18T00:35:33.000Z | shortening_calculations.py | joferkington/oost_paper_code | 6c6cf242a3fa6f4373c8ed528f6510f5fdf96d4d | [
"MIT"
] | null | null | null | shortening_calculations.py | joferkington/oost_paper_code | 6c6cf242a3fa6f4373c8ed528f6510f5fdf96d4d | [
"MIT"
] | 19 | 2015-09-10T00:21:33.000Z | 2019-05-02T19:34:38.000Z | from uncertainties import ufloat
from utilities import min_value, max_value
def main():
print 'Plate motion rate parallel to section'
print plate_motion()
print 'Shortening (including ductile) from bed-length'
print bed_length_shortening()
print 'Estimated total shortening accomodated by OOSTS'
print oost_shortening()
print 'Shortening accommodated by seaward branch of OOSTS'
print seaward_shortening()
print 'Percentage of OOST shortening'
print total_oost_percentage()
print 'Landward Percentage'
print landward_percentage()
print 'Seaward Percentage'
print seaward_percentage()
def bed_length_balancing():
"""Summed fault heaves from bed-length balancing."""
present_length = 32
# 2km error from range in restored pin lines + 10% interpretation error
restored_length = ufloat(82, 10)
shortening = restored_length - present_length
return shortening
def bed_length_shortening():
"""Shortening estimate including volume loss."""
alpha = ufloat(0.35, 0.1)
heaves = bed_length_balancing()
return heaves * (1 + alpha)
def age():
"""
Age of the oldest in-sequence structures from Strasser, 2009.
Returns:
--------
avg_age : A ufloat with an assumed 2 sigma uncertainty
min_age : The "hard" minimum from Strasser, et al, 2009
max_age : The "hard" maximum from Strasser, et al, 2009
"""
min_age = 1.95 # Ma
max_age = 2.512 # Ma
# Strasser perfers an older age within this range, so we model this as
# 2.3 +/- 0.2, but provide mins and maxs
avg_age = ufloat(2.3, 0.2) # Ma
return avg_age, min_age, max_age
def plate_motion():
"""
Plate motion rate (forearc relative to oceanic plate) _parallel_ _to_
_section_ (Not full plate vector!) based on elastic block modeling
(Loveless&Meade, 2010).
Returns:
--------
rate : A ufloat in mm/yr with a 2 sigma error
"""
# See /data/MyCode/VariousJunk/loveless_meade_block_model_slip_vector.py
# for details of derivation... Uses block segment nearest study area instead
# of derived euler pole.
# I'm assuming that Loveless's reported errors are 2 sigma...
section_parallel_rate = ufloat(42.9, 2.1)
return section_parallel_rate
def total_convergence():
"""
Total shortening parallel to section from plate motion and ages.
Returns:
--------
shortening : A ufloat representing the plate motion integrated over the
age of deformation with a 2 sigma confidence interal.
min_shortening : A "hard" minimum using the uncertainty in the plate
motion and minimum constraints on the age.
max_shortening : A "hard" maximum using the uncertainty in the plate
motion and maximum constraints on the age.
"""
avg_age, min_age, max_age = age()
rate = plate_motion()
shortening = rate * avg_age
min_shortening = min_value(min_age * rate)
max_shortening = max_value(max_age * rate)
return shortening, min_shortening, max_shortening
def oost_shortening():
"""
Shortening on the out-of-sequence thrust system based on integrated plate
convergence minus the shortening predicted in the outer wedge from line
balancing results.
Returns:
--------
shortening : A ufloat with a 2 sigma error estimate
"""
total_shortening, min_total, max_total = total_convergence()
return total_shortening - bed_length_shortening()
def seaward_shortening():
"""Shortening accomodated on the seaward branch of the OOSTS based on
comparing the total (`oost_shortening()`) shortening with the shortening
predicted on the landward branch from forearc uplift.
Returns:
--------
shortening : a ufloat with 2 sigma error in kilometers.
"""
from process_bootstrap_results import shortening_parallel_to_section
landward_shortening = shortening_parallel_to_section() / 1000
return oost_shortening() - landward_shortening
def total_oost_percentage():
"""
Percentage of shortening accommdated by out-of-sequence thrusting during
the development of the present-day outer wedge.
Returns:
--------
percentage : A ufloat with a 2 sigma error representing a unitless
ratio (e.g. multiply by 100 to get percentage).
"""
total_shortening, min_total, max_total = total_convergence()
return oost_shortening() / total_shortening
def seaward_percentage():
"""
Percentage of total plate convergence accomodated by the seaward branch of
the OOSTS during its period of activity.
Returns:
--------
percentage : A ufloat with a 2 sigma error representing a unitless
ratio (e.g. multiply by 100 to get percentage).
"""
# Duration in myr from Strasser, 2009
duration = 1.95 - 1.24
rate = plate_motion()
total = duration * rate
return seaward_shortening() / total
def landward_percentage():
"""
Maximum percentage of total plate convergence accomodated by the landward
branch of the OOSTS during its period of activity.
Returns:
--------
percentage : A ufloat with a 2 sigma error representing a unitless
ratio (e.g. multiply by 100 to get percentage).
"""
from process_bootstrap_results import shortening_parallel_to_section
landward_shortening = shortening_parallel_to_section() / 1000
duration = ufloat(0.97, 0.07) - ufloat(0.25, 0.25)
rate = plate_motion()
total = duration * rate
return landward_shortening / total
if __name__ == '__main__':
main()
| 32.434286 | 80 | 0.688161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,414 | 0.60148 |
d7d5e45498157f2f20cc7e26dda1b23ec9642455 | 1,822 | py | Python | untested/people_multi/config.py | vishalbelsare/neworder | 38635fca64f239a9e8eb1a671872c174e1814678 | [
"MIT"
] | 17 | 2017-12-08T10:21:18.000Z | 2022-01-13T09:29:43.000Z | untested/people_multi/config.py | vishalbelsare/neworder | 38635fca64f239a9e8eb1a671872c174e1814678 | [
"MIT"
] | 61 | 2018-07-21T21:37:12.000Z | 2021-07-10T12:49:15.000Z | untested/people_multi/config.py | vishalbelsare/neworder | 38635fca64f239a9e8eb1a671872c174e1814678 | [
"MIT"
] | 6 | 2019-06-06T18:29:31.000Z | 2021-08-20T13:32:17.000Z |
""" config.py
Microsimulation config for mulit-LAD MPI simulation
"""
import numpy as np
import glob
import neworder
# define some global variables describing where the starting population and the parameters of the dynamics come from
initial_populations = glob.glob("examples/people_multi/data/ssm_*_MSOA11_ppp_2011.csv")
asfr = "examples/shared/NewETHPOP_fertility.csv"
asmr = "examples/shared/NewETHPOP_mortality.csv"
# internal in-migration
asir = "examples/shared/NewETHPOP_inmig.csv"
# internal out-migration
asor = "examples/shared/NewETHPOP_outmig.csv"
# immigration
ascr = "examples/shared/NewETHPOP_immig.csv"
# emigration
asxr = "examples/shared/NewETHPOP_emig.csv"
# MPI split initial population files over threads
def partition(arr, count):
return [arr[i::count] for i in range(count)]
initial_populations = partition(initial_populations, neworder.mpi.size())
# running/debug options
neworder.log_level = 1
# initialisation
neworder.initialisations = {
"people": { "module": "population", "class_": "Population", "args": (initial_populations[neworder.mpi.rank()], asfr, asmr, asir, asor, ascr, asxr) }
}
# define the evolution
neworder.timeline = neworder.Timeline(2011.25, 2050.25, [39])
# timestep must be defined in neworder
neworder.dataframe.transitions = {
"fertility": "people.births(timestep)",
"mortality": "people.deaths(timestep)",
"migration": "people.migrations(timestep)",
"age": "people.age(timestep)"
}
# checks to perform after each timestep. Assumed to return a boolean
neworder.do_checks = True # Faith
# assumed to be methods of class_ returning True if checks pass
neworder.checks = {
"check": "people.check()"
}
# Generate output at each checkpoint
neworder.checkpoints = {
#"check_data" : "people.check()",
"write_table" : "people.write_table()"
}
| 30.881356 | 150 | 0.751921 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,150 | 0.631175 |
d7d5f7aa6c2993b4d6d4530b59b459170fa38a3a | 2,451 | py | Python | openarticlegauge/view/issue.py | CottageLabs/OpenArticleGauge | 58d29b4209a7b59041d61326ffe1cf03f98f3cff | [
"BSD-3-Clause"
] | 1 | 2016-04-07T18:29:27.000Z | 2016-04-07T18:29:27.000Z | openarticlegauge/view/issue.py | CottageLabs/OpenArticleGauge | 58d29b4209a7b59041d61326ffe1cf03f98f3cff | [
"BSD-3-Clause"
] | 11 | 2015-01-06T15:53:09.000Z | 2022-03-01T01:46:14.000Z | openarticlegauge/view/issue.py | CottageLabs/OpenArticleGauge | 58d29b4209a7b59041d61326ffe1cf03f98f3cff | [
"BSD-3-Clause"
] | null | null | null | # present and accept dispute processing
from flask import Blueprint, request, make_response, render_template, flash, redirect
from openarticlegauge.core import app
import openarticlegauge.util as util
import openarticlegauge.models as models
blueprint = Blueprint('issue', __name__)
@blueprint.route("/", methods=['GET','POST'])
@blueprint.route(".json", methods=['GET','POST'])
@blueprint.route("/<path:path>", methods=['GET','POST','DELETE'])
@util.jsonp
def issue(path=''):
givejson = util.request_wants_json()
path = path.replace('.json','')
i = False
if path:
i = models.Issue.pull(path)
if request.method == 'GET':
if givejson:
resp = make_response( i.data )
resp.mimetype = "application/json"
return resp
else:
return render_template('issue.html', issue=i)
elif request.method == 'POST':
if not i:
i = models.Issue()
if request.json:
i.data = request.json
elif request.values:
i.data['about'] = request.values['about']
i.data['issue'] = request.values['issue']
i.data['email'] = request.values['email']
else:
abort(404)
# only save an issue about an ID we actually have a record for
if len(i.data['about']) < 9:
cid = 'pmid:'
else:
cid = 'doi:'
check = models.Record.pull(cid + i.data['about'].replace('/','_'))
if check is not None:
i.save()
elif givejson:
abort(404)
else:
flash("Sorry, your issue is about an identifier for which we do not hold a record.", 'error')
return render_template('issue.html', issue=i)
if app.config['CONTACT_EMAIL'] and not app.config['DEBUG']:
text = 'Hey, an issue has been raised for ' + i.data['about'] + '\n\nView it at http://oag.cottagelabs.com/issue/' + i.id
util.send_mail([app.config['CONTACT_EMAIL']], app.config['CONTACT_EMAIL'], "issue raised", text)
if givejson:
resp = make_response( i.data )
resp.mimetype = "application/json"
return resp
else:
flash("Thanks, your issue has been raised", 'success')
return redirect('/issue/' + i.id)
elif request.method == 'DELETE' and i:
i.delete()
return ""
else:
abort(404)
| 31.423077 | 133 | 0.572827 | 0 | 0 | 0 | 0 | 2,161 | 0.881681 | 0 | 0 | 637 | 0.259894 |
d7d6977e92b32a6c300621013c13d82efe31ea62 | 3,700 | py | Python | lucky_number.py | sankalpa-udawaththa/Python-game-lucky-number | 7c7b402f88bc5ce2acb8a77f7cd5450bc05f2590 | [
"MIT"
] | null | null | null | lucky_number.py | sankalpa-udawaththa/Python-game-lucky-number | 7c7b402f88bc5ce2acb8a77f7cd5450bc05f2590 | [
"MIT"
] | null | null | null | lucky_number.py | sankalpa-udawaththa/Python-game-lucky-number | 7c7b402f88bc5ce2acb8a77f7cd5450bc05f2590 | [
"MIT"
] | null | null | null | import random
secret = random.randint(0,100000)
guess_count = 0
guess_limit = 3
name = input("Enter Your Name: ")
print(f"Hello, {name.upper()} welcome")
print(" | This is your MAGIC CODE ➡️ [NFHSFXYW1234" + str(secret) + "5678910WRYUCN] |")
print(" | Magic CODE Range Between 0 and 100000 |")
print(" | You can find your Lucky Number from above numbers |")
print(" | Your MAGIC Number is here! |")
print(" | Select you Lucky Number |")
while guess_count < guess_limit:
guess = int(input("guess: "))
if guess <= 100000:
print("")
else:
print("invalid Code")
print('''
,adPPYba, 8b,dPPYba, 8b,dPPYba, ,adPPYba, 8b,dPPYba,
a8P_____88 88P' "Y8 88P' "Y8 a8" "8a 88P' "Y8
8PP""""""" 88 88 8b d8 88
"8b, ,aa 88 88 "8a, ,a8" 88
`"Ybbd8"' 88 88 `"YbbdP"' 88
''')
print(f"Sorry {name.upper()} try another attempt")
break
guess_count += 1
if guess == secret:
print(f"Congrats {name.upper()} you won! 😁😁😁😁😁😁😁😁😁😁😁😁😁😁😁😁😁")
print('''
┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌
┌┌┌┌┌┌┌┌┌┌┌█████┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌
┌┌┌┌┌┌┌┌┌┌███████┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌
┌┌┌┌┌┌┌┌┌███┌┌┌██┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌
┌┌┌┌┌┌┌┌┌██┌┌┌┌██┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌
┌┌┌┌┌┌┌┌███┌┌┌┌███┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌
┌┌┌┌┌┌┌┌███┌┌┌┌███┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌
┌┌┌┌┌┌┌┌┌██┌┌┌┌┌███┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌
┌┌┌┌┌┌┌┌┌███┌┌┌┌┌███┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌
┌┌┌┌┌┌┌┌┌┌███┌┌┌┌┌████┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌
┌┌┌┌┌┌┌┌┌┌┌██┌┌┌┌┌┌████┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌
┌┌┌┌┌┌┌┌┌┌┌███┌┌┌┌┌┌┌███┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌
┌┌┌┌┌┌┌┌┌┌┌┌███┌┌┌┌┌┌┌███┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌
┌┌┌┌┌┌┌┌┌┌┌┌███┌┌┌┌┌┌┌┌███┌┌┌┌┌┌┌┌┌┌┌┌┌┌
┌┌┌┌┌┌┌┌┌┌┌┌┌██┌┌┌┌┌┌┌┌┌┌███┌┌┌┌┌┌┌┌┌┌┌┌
┌┌┌┌┌┌┌┌┌███┌███┌┌┌┌┌┌┌┌┌┌██┌┌┌┌┌┌┌┌┌┌┌┌
┌┌┌┌████████████┌┌┌┌┌┌┌┌┌┌┌███┌┌┌┌┌┌┌┌┌┌
┌┌████████┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌███┌┌┌┌┌┌┌┌┌
┌███┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌█████████┌┌
███┌┌┌┌█████████┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌█████┌
██┌┌┌███████┌████┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌███┌
██┌┌┌┌███┌┌┌┌┌┌┌███┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌██┌
███┌┌┌┌┌┌┌┌┌┌┌█████┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌██┌
┌███┌┌┌┌┌┌┌████████┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌██┌
┌┌████████████┌┌┌████┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌██┌
┌███┌██████┌┌┌┌┌┌┌████┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌██┌
┌███┌┌┌┌┌┌┌┌┌┌┌██████┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌██┌
┌┌████┌████┌██████████┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌██┌
┌┌┌████████████┌┌┌┌┌███┌┌┌┌┌┌┌┌┌┌┌┌┌███┌
┌┌┌┌██┌┌┌┌┌┌┌┌┌┌┌███████┌┌┌┌┌┌┌███████┌┌
┌┌┌┌████┌┌┌┌┌┌████████┌┌┌┌┌┌┌┌████████┌┌
┌┌┌┌┌████████████┌┌┌███┌┌┌┌┌███┌┌┌┌┌┌┌┌┌
┌┌┌┌┌┌┌┌███┌█┌█┌┌┌┌┌┌███┌┌┌███┌┌┌┌┌┌┌┌┌┌
┌┌┌┌┌┌┌┌███┌┌┌┌┌┌█████┌┌█████┌┌┌┌┌┌┌┌┌┌┌
┌┌┌┌┌┌┌┌┌██████████████████┌┌┌┌┌┌┌┌┌┌┌┌┌
┌┌┌┌┌┌┌┌┌┌██████████████┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌
┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌┌''')
break
else:
print("Sorry " + name + " You failed 😕😕😕😕😕😕😕😕")
print('''
░░░░░░░░░░░█████████████
░░░░░░░░░███░███░░░░░░██
███░░░░░██░░░░██░██████████
████████░░░░░░████░░░░░░░██
████░░░░░░░░░░██░░██████████
████░░░░░░░░░░░███░░░░░░░░░██
████░░░░░░░░░░░██░░██████████
████░░░░░░░░░░░░████░░░░░░░░█
████░░░░░░░░░░░░░███░░████░░█
█████████░░░░░░░░░░████░░░░░█
███░░░░░██░░░░░░░░░░░░░█████
░░░░░░░░░███░░░░░░░██████
░░░░░░░░░░░██░░░░░░██
░░░░░░░░░░░░███░░░░░██
░░░░░░░░░░░░░░██░░░░██
░░░░░░░░░░░░░░░███░░░██
░░░░░░░░░░░░░░░░░██░░░█
░░░░░░░░░░░░░░░░░░█░░░█
░░░░░░░░░░░░░░░░░░██░██
░░░░░░░░░░░░░░░░░░░███
Please , Try Again!''') | 37.755102 | 119 | 0.176216 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,250 | 0.935122 |
d7d6a3b1326a40ebd9018af61c19a1860d81c976 | 9,195 | py | Python | tutorials/auction/smartpy/smartpy_generated/nft_wallet/Dutch_pure.py | tqtezos/ticket-tutorials | 37ea1c489173941b6605c2a850f2d33a59c53256 | [
"MIT"
] | 7 | 2020-12-01T20:52:12.000Z | 2021-08-09T18:26:12.000Z | tutorials/auction/smartpy/smartpy_generated/nft_wallet/Dutch_pure.py | tqtezos/ticket-tutorials | 37ea1c489173941b6605c2a850f2d33a59c53256 | [
"MIT"
] | 1 | 2021-04-20T12:05:12.000Z | 2021-04-20T12:05:12.000Z | tutorials/auction/smartpy/smartpy_generated/nft_wallet/Dutch_pure.py | tqtezos/ticket-tutorials | 37ea1c489173941b6605c2a850f2d33a59c53256 | [
"MIT"
] | 1 | 2021-02-27T16:33:27.000Z | 2021-02-27T16:33:27.000Z | import smartpy as sp
AUCTION_PARAMS_TYPE = sp.TRecord(opening_price = sp.TNat,
reserve_price = sp.TNat,
start_time = sp.TTimestamp,
round_time = sp.TInt,
ticket = sp.TTicket(sp.TNat))
METADATA_TYPE = sp.TMap(sp.TString, sp.TBytes)
TOKEN_METADATA_TYPE = sp.TBigMap(sp.TNat,
sp.TPair(sp.TNat, METADATA_TYPE))
class NFTWallet(sp.Contract):
def __init__(self, owner):
self.add_flag("edo")
self.init_type(sp.TRecord(admin = sp.TAddress,
tickets = sp.TBigMap(sp.TNat,
sp.TTicket(sp.TNat)),
current_id = sp.TNat,
token_metadata = TOKEN_METADATA_TYPE))
self.init(admin = owner,
tickets = sp.big_map({}),
current_id = 0,
token_metadata = sp.big_map({}))
@sp.entry_point
def createNft(self, metadata):
sp.set_type(metadata, METADATA_TYPE)
sp.verify(sp.sender == self.data.admin)
my_ticket = sp.ticket(self.data.current_id, 1)
current_id = self.data.current_id
new_map = sp.update_map(self.data.tickets, current_id, sp.some(my_ticket))
self.data.tickets = new_map
self.data.token_metadata[current_id] = sp.pair(current_id, metadata)
self.data.current_id = current_id + 1
@sp.entry_point
def receiveNft(self, nft):
sp.set_type(nft, sp.TTicket(sp.TNat))
ticket_data, ticket_next = sp.read_ticket(nft)
qty = sp.compute(sp.snd(sp.snd(ticket_data)))
originator = sp.compute(sp.fst(ticket_data))
id = sp.compute(sp.fst(sp.snd(ticket_data)))
sp.verify(qty == 1, "Only send 1 Nft to this entrypoint")
sp.verify(sp.source == self.data.admin, "Ticket needs to be sent by wallet admin")
current_id = self.data.current_id
new_map = sp.update_map(self.data.tickets, current_id, sp.some(ticket_next))
self.data.tickets = new_map
self.data.current_id = current_id + 1
@sp.entry_point
def sendNft(self, params):
sp.set_type(params, sp.TRecord(ticket_id = sp.TNat, send_to = sp.TContract(sp.TTicket(sp.TNat))))
sp.verify(sp.sender == self.data.admin)
my_ticket, new_map = sp.get_and_update(self.data.tickets, params.ticket_id, sp.none)
sp.verify(my_ticket.is_some(), "Ticket does not exist")
self.data.tickets = new_map
sp.transfer(my_ticket.open_some(), sp.mutez(0), params.send_to)
@sp.entry_point
def configNftAuction(self, params):
sp.verify(sp.sender == self.data.admin)
sp.set_type(params, sp.TRecord(auction_address = sp.TAddress,
opening_price = sp.TNat,
reserve_price = sp.TNat,
start_time = sp.TTimestamp,
round_time = sp.TInt,
ticket_id = sp.TNat))
my_ticket, new_map = sp.get_and_update(self.data.tickets, params.ticket_id, sp.none)
sp.verify(my_ticket.is_some(), "Ticket does not exist")
self.data.tickets = new_map
auction_params = sp.record(opening_price = params.opening_price,
reserve_price = params.reserve_price,
start_time = params.start_time,
round_time = params.round_time,
ticket = my_ticket.open_some())
auction_contract = sp.contract(AUCTION_PARAMS_TYPE, params.auction_address, entry_point = "configureAuction").open_some()
sp.transfer(auction_params, sp.mutez(0), auction_contract)
class DutchAuction(sp.Contract):
def __init__(self, admin):
self.add_flag("edo")
self.init(owner = admin,
current_price = 0,
reserve_price = 0,
in_progress = sp.bool(False),
start_time = sp.timestamp(0),
round_time = 0,
ticket = sp.none)
self.init_type(t = sp.TRecord(owner = sp.TAddress,
current_price = sp.TNat,
reserve_price = sp.TNat,
in_progress = sp.TBool,
start_time = sp.TTimestamp,
round_time = sp.TInt,
ticket = sp.TOption(sp.TTicket(sp.TNat))))
@sp.entry_point
def configureAuction(self, params):
sp.set_type(params, AUCTION_PARAMS_TYPE)
sp.verify(sp.source == self.data.owner, "User Not Authorized")
sp.verify(~self.data.in_progress, "Auction in progress")
self.data.current_price = params.opening_price
self.data.reserve_price = params.reserve_price
self.data.start_time = params.start_time
self.data.round_time = params.round_time
self.data.ticket = sp.some(params.ticket)
@sp.entry_point
def startAuction(self):
sp.verify(sp.sender == self.data.owner, "User not Authorized")
sp.verify(~self.data.in_progress, "Auction in progress")
#Verify ticket/asset sent
sp.verify(self.data.ticket.is_some(), "No ticket to auction")
# verify now is at least start time of auction
sp.verify(sp.now >= self.data.start_time, "Too early to start auction")
self.data.in_progress = sp.bool(True)
self.data.start_time = sp.now
@sp.entry_point
def dropPrice(self, new_price):
sp.set_type(new_price, sp.TNat)
sp.verify(sp.sender == self.data.owner, "User not Authorized")
sp.verify(self.data.in_progress, "No Auction in progress")
sp.verify(new_price < self.data.current_price, "Price not dropped")
sp.verify(new_price >= self.data.reserve_price, "Price below reserve_price")
# verify now more than round_end_time = start_time + round_time
sp.verify(sp.now > self.data.start_time.add_seconds(self.data.round_time), "Previous round has not ended")
self.data.current_price = new_price
self.data.start_time = sp.now
@sp.entry_point
def buy(self, wallet_address):
sp.set_type(wallet_address, sp.TAddress)
sp.verify(self.data.in_progress)
sp.verify(~(sp.sender == self.data.owner))
sp.verify(sp.amount == sp.mutez(self.data.current_price))
# verify now less than round_end_time = start_time + round_time
sp.verify(sp.now < self.data.start_time.add_seconds(self.data.round_time))
sp.send(self.data.owner, sp.amount)
#Send ticket/asset to winner. They must have a "receive_ticket" entrypoint that accepts ticket of correct type
c = sp.contract(sp.TTicket(sp.TNat), wallet_address, entry_point = "receiveNft").open_some()
sp.transfer(self.data.ticket.open_some(), sp.mutez(0), c)
# endAuction
self.data.ticket = sp.none
self.data.in_progress = sp.bool(False)
@sp.entry_point
def cancelAuction(self):
sp.verify(self.data.in_progress, "No Auction in progress")
sp.verify(sp.sender == self.data.owner, "User not Authorized")
self.data.current_price = 0
#Send back ticket to owner
c = sp.contract(sp.TTicket(sp.TNat), self.data.owner, entry_point = "receiveNft").open_some()
sp.transfer(self.data.ticket.open_some(), sp.mutez(0), c)
# endAuction
self.data.ticket = sp.none
self.data.in_progress = sp.bool(False)
class Viewer(sp.Contract):
def __init__(self, t):
self.init(last = sp.none)
self.init_type(sp.TRecord(last = sp.TOption(t)))
@sp.entry_point
def target(self, params):
self.data.last = sp.some(params)
@sp.add_test(name = "Test Auction")
def test():
time = sp.timestamp(1571761674)
# Create test scenario
scenario = sp.test_scenario()
scenario.table_of_contents()
# sp.test_account generates ED25519 key-pairs deterministically:
alice = sp.test_account("Alice")
bob = sp.test_account("Robert")
# Create HTML output for debugging
scenario.h1("Dutch Auction")
# Instantiate Auction contract
auction = DutchAuction(alice.address)
scenario += auction
alice_wallet = NFTWallet(alice.address)
bob_wallet = NFTWallet(bob.address)
scenario += alice_wallet
scenario += bob_wallet
scenario.h2("Create NFT")
token_metadata = sp.map({"name" : sp.bytes_of_string("Nft1")})
scenario += alice_wallet.createNft(token_metadata).run(sender = alice)
scenario.h2("Configure and start auction")
scenario += alice_wallet.configNftAuction(auction_address = auction.address,
opening_price = 100,
reserve_price = 10,
start_time = time,
round_time = 1000,
ticket_id = 0).run(source = alice, sender = alice, now = time)
scenario.verify(~ alice_wallet.data.tickets.contains(0))
time = time.add_seconds(1)
scenario += auction.startAuction().run(sender = alice, now = time)
time = time.add_seconds(6001)
scenario += auction.dropPrice(90).run(sender = alice, now = time)
scenario.h2("Bob buys")
time = time.add_seconds(1)
scenario += auction.buy(bob_wallet.address).run(sender = bob, source = bob, now = time, amount = sp.mutez(90))
scenario.verify(bob_wallet.data.tickets.contains(0))
scenario.verify(~ auction.data.ticket.is_some())
| 40.328947 | 129 | 0.641979 | 7,139 | 0.7764 | 0 | 0 | 7,475 | 0.812942 | 0 | 0 | 1,089 | 0.118434 |
d7d6f3292bc828516848b10b41f3ffe87678ff9d | 4,839 | py | Python | src/input_data_generation/Data_Generation.py | jwkim98/SocioCraft | 7e6aa9ff38cca7694fe6a5e408825636616ae1b3 | [
"MIT"
] | null | null | null | src/input_data_generation/Data_Generation.py | jwkim98/SocioCraft | 7e6aa9ff38cca7694fe6a5e408825636616ae1b3 | [
"MIT"
] | 6 | 2019-11-20T13:29:29.000Z | 2019-12-19T17:57:21.000Z | src/input_data_generation/Data_Generation.py | jwkim98/SocialSim | 7e6aa9ff38cca7694fe6a5e408825636616ae1b3 | [
"MIT"
] | null | null | null | # 일반적으로 Data Generation file은 Group 별로 data를 생성 가능
# Group의 개념을 만들지 않으려면 모든 Group의 조건을 같이 하면 단 한개의 Group만 생성된 효과를 가져올 수 있음
import csv
import numpy as np
looptime=3 #Group 개수
number=[100,100,100] #각 Group당 사람수
number2=[100,100,100] #각 Group당 사람수
strengthlimit=100 #최대 Strength
mean=[0,0,0] #각 Group 당 사용할 Gaussian Distribution Mean
money=5000 #각 사람 당 Money(Group과 무관)
selfishvar=[1,1,1] #각 Group 당 variance for selfishness
selflessvar=[1,1,1] #각 Group 당 variance for selflessness
strvar=[1,1,1] #각 Group 당 variance for strength
f=open('Personal data.csv','w')
write=csv.writer(f)
#Personal Data
for k in range(looptime):
#Selfishness
selfishness=np.random.normal(mean[k],selfishvar[k],number[k]) #Selfishness 뽑기
minselfishness=min(selfishness)
if (minselfishness<0): #만약 뽑은 데이터 중 음수가 있으면 모든 데이터를 양수로 만들기(Selfishness는 양수이므로)
selfishness=selfishness+abs(minselfishness)
maxselfishness=max(selfishness)
selfishness=selfishness/maxselfishness #데이터 0~1로 Normalize
selfishness=selfishness
#Selflessness
selflessness=np.random.normal(mean[k],selflessvar[k],number[k]) #Selflessness 뽑기
minselflessness=min(selflessness)
if (minselflessness<0): #만약 뽑은 데이터 중 음수가 있으면 모든 데이터를 양수로 만들기(Selflessness는 양수이므로)
selflessness=selflessness+abs(minselflessness)
maxselflessness=max(selflessness)
selflessness=selflessness/maxselflessness #데이터 0~1로 Normalize
selflessness=selflessness
#Strength
strength=np.random.normal(mean[k],strvar[k],number[k]) #strength 뽑기
minstrength=min(strength)
if (minstrength<0): #만약 뽑은 데이터 중 음수가 있으면 모든 데이터를 양수로 만들기(Strength는 양수이므로)
strength=strength+abs(minstrength)
maxstrength=max(strength)
strength=strength/maxstrength #데이터 0~1로 Normalize
strength=strength*strengthlimit
#Group ID, Personal ID, 사람번호, 이타심, 이기심, money, strength 순
for i in range(number[k]):
if (k==0):
personid=i
else:
personid=i
for j in range(k):
personid=personid+number[j]
write.writerow(["%d" %0, "%d" %personid,"%.3f" %selflessness[i],"%.3f" %selfishness[i],"%d" %money,"%.3f" %strength[i]]) #만약 Group이 한개라면 모든 person의 Group ID는 0
f.close()
#Relational Data
g=open('Relationship data.csv','w')
write=csv.writer(g)
meanforrelationship=0
relationvar=1 #variance for relationship
totalnumber=0
plus=relationvar #plus for relationship between same relationship
for i in range(looptime):
totalnumber=totalnumber+number[i]
numberforindex=number
for a in range(looptime):
if (a!=0):
numberforindex[a]=numberforindex[a]+number[a-1]
for i in range(totalnumber):
#Make Relationship per Person
PersonalRelationship=np.random.normal(meanforrelationship,relationvar,totalnumber-1)
c=0
groupnumber=0
while(i>=numberforindex[c]):
groupnumber=groupnumber+1
c=c+1
numberforplus=number2[groupnumber]
for d in range(numberforplus-1):
if (groupnumber==0):
indexforplus=d
else:
indexforplus=numberforindex[groupnumber-1]+d
PersonalRelationship[indexforplus]=PersonalRelationship[indexforplus] #Better relationship(만약 Group이 한개이면 plus 값 안넣음)
maxRelation=abs(PersonalRelationship[0])
for j in range(totalnumber-1):
if(abs(PersonalRelationship[j])>maxRelation):
maxRelation=abs(PersonalRelationship[j])
PersonalRelationship=PersonalRelationship/maxRelation
index=0
# Opportunity to meet person
for k in range(totalnumber):
if (k!=i):
if(groupnumber==0):
if (k<number2[0]):
prob=0.05 # Group의 개념이 있다면 같은 그룹끼리 만나는 확률은 0.7, Group이 한개라면 0.05
write.writerow(["%d" %i, "%d" %k, PersonalRelationship[index],"%.5f" %prob])
index=index+1
else:
prob=0.05
write.writerow(["%d" %i, "%d" %k, PersonalRelationship[index],"%.5f" %prob])
index=index+1
else:
if (k>=number[groupnumber-1] and k<number[groupnumber]):
prob=0.05 # Group의 개념이 있다면 같은 그룹끼리 만나는 확률은 0.7, Group이 한개라면 0.05
write.writerow(["%d" %i, "%d" %k, PersonalRelationship[index],"%.5f" %prob])
index=index+1
else:
prob=0.05
write.writerow(["%d" %i, "%d" %k, PersonalRelationship[index],"%.5f" %prob])
index=index+1
g.close()
#확률 : Group 외는 0.05, Group 내는 0.7 | 38.102362 | 168 | 0.613763 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,788 | 0.330682 |
d7d7eb91cc6291ffc2e3931f28156e9ccd13a035 | 1,826 | py | Python | inaworld/tokens.py | epfahl/inaworld | 886bd1c0ce143eb09d5fe3c479e0ed0e796bf836 | [
"MIT"
] | null | null | null | inaworld/tokens.py | epfahl/inaworld | 886bd1c0ce143eb09d5fe3c479e0ed0e796bf836 | [
"MIT"
] | null | null | null | inaworld/tokens.py | epfahl/inaworld | 886bd1c0ce143eb09d5fe3c479e0ed0e796bf836 | [
"MIT"
] | null | null | null | """Tokenize a document.
*** NLTK tokenization and this module have been deprecated in favor of a
sklearn-based solution. However, NLTK may offer more options for tokenization,
stemming, etc., this module is retained for future reference.
"""
import re
import nltk
import toolz as tz
re_not_alpha = re.compile('[^a-zA-Z]')
STOPWORDS = set(nltk.corpus.stopwords.words('english'))
def is_alpha(tt):
"""Given a POS tagged token (<token>, <pos>), return True if the token has
only alphabetic characters (i.e., no punctuation or numbers).
"""
return not bool(re_not_alpha.search(tt[0]))
def not_proper(tt):
"""Given a POS tagged token (<token>, <pos>), return True if the token is
not tagged as a proper noun ('NNP').
"""
return (tt[1] != 'NNP')
def not_stopword(tt):
"""Given a POS tagged token (<token>, <pos>), return True if the token is
not a stopword.
"""
return (tt[0] not in STOPWORDS)
def lower(tt):
"""Given a POS tagged token (<token>, <pos>), return
(<token>.lower(), <pos>).
"""
return (tt[0].lower(), tt[1])
def stem(tt):
"""Given a POS tagged token (<token>, <pos>), return
(<stemmed token>, <pos>).
"""
return (nltk.stem.lancaster.LancasterStemmer().stem(tt[0]), tt[1])
def remove_pos(tt):
"""Given a POS tagged token (<token>, <pos>), return only the token.
"""
return tt[0]
def tokenize(doc, with_stem=False):
"""Given a document string, return a list of tokens.
"""
pipeline = [
(filter, is_alpha),
(filter, not_proper),
(map, lower),
(filter, not_stopword)]
if with_stem:
pipeline += [(map, stem)]
pipeline += [(map, remove_pos)]
return list(tz.thread_last(
nltk.tag.pos_tag(nltk.tokenize.word_tokenize(doc)),
*pipeline))
| 25.71831 | 79 | 0.624863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 955 | 0.523001 |
d7d889701d322c85cba5e27beaf93698e762c786 | 2,483 | py | Python | tests/server_state_item_test.py | whitphx/streamlit-server-state | 12e0d07e4863ea33529c1685c764c9a4214df8e2 | [
"MIT"
] | 20 | 2021-07-11T14:47:50.000Z | 2022-02-08T07:44:03.000Z | tests/server_state_item_test.py | whitphx/streamlit-server-state | 12e0d07e4863ea33529c1685c764c9a4214df8e2 | [
"MIT"
] | 31 | 2021-07-23T15:21:27.000Z | 2022-03-30T15:19:17.000Z | tests/server_state_item_test.py | whitphx/streamlit-server-state | 12e0d07e4863ea33529c1685c764c9a4214df8e2 | [
"MIT"
] | 1 | 2022-01-03T02:01:24.000Z | 2022-01-03T02:01:24.000Z | from unittest.mock import ANY, Mock, patch
import pytest
from streamlit_server_state.server_state_item import ServerStateItem
@pytest.fixture
def patch_is_rerunnable():
with patch(
"streamlit_server_state.server_state_item.is_rerunnable"
) as mock_is_rerunnable:
mock_is_rerunnable.return_value = True
yield
def test_bound_sessions_are_requested_to_rerun_when_value_is_set_or_update(
patch_is_rerunnable,
):
session = Mock()
item = ServerStateItem()
item.bind_session(session)
session.request_rerun.assert_not_called()
item.set_value(42)
session.request_rerun.assert_has_calls([ANY])
item.set_value(100)
session.request_rerun.assert_has_calls([ANY, ANY])
def test_all_bound_sessions_are_requested_to_rerun(patch_is_rerunnable):
session1 = Mock()
session2 = Mock()
item = ServerStateItem()
item.bind_session(session1)
item.bind_session(session2)
session1.request_rerun.assert_not_called()
session2.request_rerun.assert_not_called()
item.set_value(42)
session1.request_rerun.assert_has_calls([ANY])
session2.request_rerun.assert_has_calls([ANY])
item.set_value(100)
session1.request_rerun.assert_has_calls([ANY, ANY])
session2.request_rerun.assert_has_calls([ANY, ANY])
def test_bound_sessions_are_not_duplicate(patch_is_rerunnable):
session = Mock()
item = ServerStateItem()
item.bind_session(session)
item.bind_session(session) # Bind the sessoin twice
session.request_rerun.assert_not_called()
item.set_value(42)
session.request_rerun.assert_called_once()
def test_bound_sessions_are_not_requested_to_rerun_when_the_set_value_is_not_changed(
patch_is_rerunnable,
):
session = Mock()
item = ServerStateItem()
item.bind_session(session)
session.request_rerun.assert_not_called()
item.set_value(42)
session.request_rerun.assert_called_once()
item.set_value(42)
session.request_rerun.assert_called_once() # No new calls
def test_bound_sessions_are_requested_to_rerun_when_a_same_but_mutated_object_is_set(
patch_is_rerunnable,
):
session = Mock()
item = ServerStateItem()
item.bind_session(session)
session.request_rerun.assert_not_called()
item.set_value({})
session.request_rerun.assert_has_calls([ANY])
value = item.get_value()
value["foo"] = 42
item.set_value(value)
session.request_rerun.assert_has_calls([ANY, ANY])
| 24.343137 | 85 | 0.755135 | 0 | 0 | 197 | 0.07934 | 213 | 0.085783 | 0 | 0 | 99 | 0.039871 |
d7d93021ccae0a77677be8252a5390c6ed0ed049 | 686 | py | Python | sympycore/heads/arithmetic.py | radovankavicky/pymaclab | 21da758f64ed0b62969c9289576f677e977cfd98 | [
"Apache-2.0"
] | 96 | 2015-01-25T05:59:56.000Z | 2021-12-29T14:05:22.000Z | sympycore/heads/arithmetic.py | 1zinnur9/pymaclab | 21da758f64ed0b62969c9289576f677e977cfd98 | [
"Apache-2.0"
] | 3 | 2015-12-17T19:25:46.000Z | 2018-06-19T07:05:20.000Z | sympycore/heads/arithmetic.py | 1zinnur9/pymaclab | 21da758f64ed0b62969c9289576f677e977cfd98 | [
"Apache-2.0"
] | 36 | 2016-01-31T15:22:01.000Z | 2021-03-29T07:03:07.000Z |
#obsolete, each head should be defined in a separate file
__all__ = ['FLOORDIV', 'MOD']
from .base import NaryHead
class ModHead(NaryHead):
"""
ModHead represents module n-ary operation,
data is a n-tuple of expression operands.
"""
op_mth = '__mod__'
op_rmth = '__rmod__'
op_symbol = '%'
def __repr__(self): return 'MOD'
class FloordivHead(NaryHead):
"""
FloordivHead represents floor-division n-ary operation,
data is a n-tuple of expression operands.
"""
op_mth = '__floordiv__'
op_rmth = '__rfloordiv__'
op_symbol = '//'
def __repr__(self): return 'FLOORDIV'
MOD = ModHead()
FLOORDIV = FloordivHead()
| 18.540541 | 59 | 0.653061 | 516 | 0.752187 | 0 | 0 | 0 | 0 | 0 | 0 | 363 | 0.529155 |
d7d94e78e81c338291612ee9c829d5e728b8d936 | 68 | py | Python | boxuegu/apps/courses/views.py | libin-c/bxg | c509a5b39bc3f3f34ad9d7fbfb61a63d2f67bc23 | [
"MIT"
] | 1 | 2019-06-13T10:08:25.000Z | 2019-06-13T10:08:25.000Z | boxuegu/apps/courses/views.py | libin-c/bxg | c509a5b39bc3f3f34ad9d7fbfb61a63d2f67bc23 | [
"MIT"
] | null | null | null | boxuegu/apps/courses/views.py | libin-c/bxg | c509a5b39bc3f3f34ad9d7fbfb61a63d2f67bc23 | [
"MIT"
] | null | null | null | from django.views import View
class CourseListView(View):
pass | 13.6 | 29 | 0.764706 | 36 | 0.529412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d7da6c76e88c3141152b6b83c5b2260da1ff5ebc | 168 | py | Python | classification/admin/__init__.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 5 | 2021-01-14T03:34:42.000Z | 2022-03-07T15:34:18.000Z | classification/admin/__init__.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 551 | 2020-10-19T00:02:38.000Z | 2022-03-30T02:18:22.000Z | classification/admin/__init__.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | null | null | null | from classification.admin.classification_admin import *
from classification.admin.clinvar_export_admin import *
from classification.admin.condition_text_admin import *
| 42 | 55 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d7dcce491e877298e10f20725e26c61897450ea2 | 17,656 | py | Python | scripts/patch-uld-elf.py | cutty/uld-fdpic | f035bf5f24068eeaeac9dea497cbaef536849637 | [
"MIT"
] | 8 | 2019-03-14T17:33:05.000Z | 2022-01-11T09:52:18.000Z | scripts/patch-uld-elf.py | Cutty/uld-fdpic | f035bf5f24068eeaeac9dea497cbaef536849637 | [
"MIT"
] | null | null | null | scripts/patch-uld-elf.py | Cutty/uld-fdpic | f035bf5f24068eeaeac9dea497cbaef536849637 | [
"MIT"
] | 3 | 2019-05-18T08:36:38.000Z | 2021-02-22T10:12:38.000Z | #!/usr/bin/env python
# Copyright (c) 2016, 2017 Joe Vernaci
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import commands
import operator
import os
import re
import struct
import sys
import zlib
DEFAULT_FILES_SEC = '.files'
DEFAULT_FS_TABLE_SEC = '.fs_table'
DEFAULT_PSTORE_SEC = '.uld_pdata'
DEFAULT_PSTORE_OFF = 0
PLT_ENTRY_SIZE = 0x14
PLT_ENTRY_GOTOFFFUNCDESC_OFFSET = 0x10
FS_ENTRY_FMT = '<IIIII'
FS_ENTRY_SIZE = struct.calcsize(FS_ENTRY_FMT)
FS_ENTRY_CRC_OFFSET = 0xc
PSTORE_FS_TABLE_CRC_OFFSET = 0x18
ROFIXUP_MEM_SEC_LIST = [
'.got',
'.got.plt',
'.data',
'.bss'
]
_debug = 0
def dprint(s):
global _debug
if _debug > 0:
print(s)
class CmdError(Exception):
pass
def qc(cmd):
s, o = commands.getstatusoutput(cmd)
if (s != 0):
print(o)
raise CmdError('cmd: \'{}\' exited with code: {}'.format(cmd, s))
return o
def wsclean(s):
return re.sub(r'\s+', ' ', s.strip())
class ElfSection(object):
def __init__(self, idx, name, size, vma, lma, file_off, algn, flags):
self.idx = idx
self.name = name
self.size = size
self.vma = vma
self.lma = lma
self.file_off = file_off
self.algn = algn
self.flags = flags
self.file_size = None
self.type = None
def __str__(self):
ret = 'ElfSection(idx={}, name={}, size=0x{:08x}, vma=0x{:08x}, ' \
'lma=0x{:08x}, file_off=0x{:08x}, algn={}, flags={}, '
if self.file_size is None:
ret += 'file_size={}, '
else:
ret += 'file_size=0x{:08x}, '
ret += 'type={})'
ret = ret.format(self.idx, self.name, self.size, self.vma, self.lma,
self.file_off, self.algn, self.flags, self.file_size,
self.type)
return ret
def get_elf_sections(path):
ret = []
objdump = os.environ.get('OBJDUMP', 'objdump')
readelf = os.environ.get('READELF', 'readelf')
# Get section data from objdump/readelf and trim headers and footers off.
oc_out = qc('{} -h {}'.format(objdump, path)).split('\n')[5:]
re_out = qc('{} -S {}'.format(readelf, path)).split('\n')[5:-4]
# Data extraction was tested using binutils 2.22 and 2.26.1.
oc_iter = iter(oc_out)
for line in oc_iter:
flags = oc_iter.next().strip()
if 'ALLOC' not in flags:
continue
line = wsclean(line).split(' ')
idx, name, size, vma, lma, file_off, algn = line
elf_sec = ElfSection(idx, name, int(size, 16), int(vma, 16),
int(lma, 16), int(file_off, 16), algn, flags)
ret.append(elf_sec)
re_dict = {}
for line in re_out:
line = line[line.find('.'):]
line = wsclean(line).split(' ')
# Skip entries that do not have the ALLOC flag.
if 'A' not in line[-4]:
continue
name = line[0]
sec_type = line[1]
re_dict[name] = sec_type
for elf_sec in ret:
elf_sec.type = re_dict.get(elf_sec.name, None)
sec_list = ret[:]
sec_list = [x for x in sec_list if x.type == 'NOBITS']
for elf_sec in sec_list:
elf_sec.file_size = 0
sec_list = ret[:]
sec_list = [x for x in sec_list if x.type != 'NOBITS']
sec_list.sort(key=operator.attrgetter('file_off'))
# elf_sec.size (i.e. size during execution) may not always be the same
# size as section size in the file.
for index in range(len(sec_list)):
elf_sec = sec_list[index]
if index == len(sec_list) - 1:
# Best guess.
elf_sec.file_size = elf_sec.size
break
next_elf_sec = sec_list[index + 1]
file_size = next_elf_sec.file_off - elf_sec.file_off
# Cover case where there may be orphaned data in between sections in
# the file.
if file_size > elf_sec.size:
file_size = elf_sec.size
elf_sec.file_size = file_size
return ret
def sec_list_to_dict(sec_list):
return {x.name: x for x in sec_list}
def sec_name_in_sec_list(sec_list, name):
return sec_list_to_dict(sec_list).has_key(name)
def name_to_sec(sec_list, name):
return sec_list_to_dict(sec_list).get(name, None)
def file_off_to_sec(sec_list, file_off):
for elf_sec in sec_list:
if file_off >= elf_sec.file_off and \
file_off < elf_sec.file_off + elf_sec.file_size:
return elf_sec
raise ValueError('Could not find section for file_off: 0x{:08x}'.format(
file_off))
def lma_to_sec(sec_list, lma):
for elf_sec in sec_list:
if lma >= elf_sec.lma and lma < elf_sec.lma + elf_sec.file_size:
return elf_sec
raise ValueError('Could not find section for lma: 0x{:08x}'.format(
lma))
def vma_to_sec(sec_list, vma):
for elf_sec in sec_list:
if vma >= elf_sec.vma and vma < elf_sec.vma + elf_sec.size:
return elf_sec
raise ValueError('Could not find section for vma: 0x{:08x}'.format(
vma))
def lma_to_file_off(sec_list, lma):
elf_sec = lma_to_sec(sec_list, lma)
return lma - elf_sec.lma + elf_sec.file_off
def extract_sec(fd, sec_list, name):
fd_off = fd.tell()
sec_dict = sec_list_to_dict(sec_list)
fd.seek(sec_dict[name].file_off)
buf = fd.read(sec_dict[name].file_size)
fd.seek(fd_off)
return buf
class FSEntry(object):
def __init__(self, e_file_off, buf, name_len):
self.e_file_off = e_file_off
self.file_base, self.next_e, self.size, self.crc, self.flags, \
self.name = struct.unpack(
FS_ENTRY_FMT + '{}s'.format(name_len), buf)
def __str__(self):
ret = 'FSEntry(file_base=0x{:08x}, next_e=0x{:08x}, size=0x{:08x}, ' \
'crc=0x{:08x}, flags=0x{:08x}, name={})'
ret = ret.format(self.file_base, self.next_e, self.size, self.crc,
self.flags, self.name)
return ret
def parse_fs_table(buf, buf_file_off, sec_list):
ret = []
e_base = 0
while True:
# This will put offset at the first char of fs_entry.name
offset = e_base + FS_ENTRY_SIZE
# Find the variable length of fs_entry.name.
while buf[offset] != '\0':
offset += 1
#fse = FSEntry(e_base + buf_file_off, buf[e_base:offset + 1])
fse = FSEntry(e_base + buf_file_off, buf[e_base:offset],
offset - FS_ENTRY_SIZE - e_base)
ret.append(fse)
if fse.next_e is 0:
break
e_base = lma_to_file_off(sec_list, fse.next_e)
e_base -= buf_file_off
if e_base < 0 or e_base > len(buf):
err = 'Next FS entry at lma: 0x{:08x} file_off: 0x{:08x} not ' \
'in range of buf'
raise ValueError(err.format(fse.next_e, lma_to_file_off(sec_list,
fse.next_e)))
return ret
def apply_rofixups(uld_sec_list, uld_fd, elf_sec_list, elf_fd, elf_file_lma):
if not sec_name_in_sec_list(elf_sec_list, '.rofixup'):
return
uld_opos = uld_fd.tell()
elf_opos = elf_fd.tell()
elf_file_off = lma_to_file_off(uld_sec_list, elf_file_lma)
fixups = extract_sec(elf_fd, elf_sec_list, '.rofixup')
fixups = [struct.unpack('<I', fixups[x:x + 4])[0] for x in
range(0, len(fixups), 4)]
global _debug
for addr in fixups:
sec = lma_to_sec(elf_sec_list, addr)
file_off = lma_to_file_off(elf_sec_list, addr)
elf_fd.seek(file_off)
value = elf_fd.read(4)
value = struct.unpack('<I', value)[0]
# If a fixup value does not have a valid lma it is most likely pointing
# to a NOBITS sections (i.e. .bss) and should already be set as skip.
try:
value_sec = lma_to_sec(elf_sec_list, value)
except ValueError:
if skip is False:
raise
value_sec = None
# Sections that will be loaded into memory will have fixups applied
# at runtime.
if value_sec is None or value_sec.name in ROFIXUP_MEM_SEC_LIST:
skip = True
else:
skip = False
if _debug > 0:
if value_sec is None:
try:
value_sec = vma_to_sec(elf_sec_list, value)
value_sec_name = value_sec.name + ' (vma)'
except:
value_sec_name = 'UNKNOWN'
else:
value_sec_name = value_sec.name
if skip is True:
value_sec_name += ' (skip)'
fmt = ' addr: 0x{:08x} sec: {:<12s} file_off: 0x{:08x} ' \
'value: 0x{:08x} value_sec: {}'
dprint(fmt.format(addr, sec.name, file_off, value, value_sec_name))
if skip is True:
continue
# Sanity check that everything is in the right place.
uld_fd.seek(elf_file_off + file_off)
uld_value = uld_fd.read(4)
uld_value = struct.unpack('<I', uld_value)[0]
if value != uld_value:
fmt = 'Incorrect value 0x{:08x} at 0x{:08x} in uld_fd ' \
'expected 0x{:08x}'
raise ValueError(fmt.format(uld_value, elf_file_off + file_off,
value))
fixup_value = lma_to_file_off(elf_sec_list, value) + elf_file_lma
msg = ' Applying fixup for 0x{:08x} value 0x{:08x}->0x{:08x}'
dprint(msg.format(addr, value, fixup_value))
fixup_value = struct.pack('<I', fixup_value)
uld_fd.seek(-4, 1)
uld_fd.write(fixup_value)
uld_fd.seek(uld_opos)
elf_fd.seek(elf_opos)
def patch_plt_gotofffuncdesc(uld_sec_list, uld_fd, elf_sec_list, elf_fd,
elf_file_lma):
plt_sec = name_to_sec(elf_sec_list, '.plt')
count = 0
picreg_offset = 0
uld_opos = uld_fd.tell()
if plt_sec is not None:
# A .plt section should not exist without a .got.plt
got_plt_sec = name_to_sec(elf_sec_list, '.got.plt')
if got_plt_sec is None:
raise ValueError('.got.plt section not found when .plt section '
'is present')
# ld is generating the GOTOFFFUNCDESC values as an offset from the
# base of .got.plt. During runtime the values in .got.plt are
# referenced using the pic base register which will point to the
# base of .got if present (if not it will be the base of .got.plt).
# Update the GOTOFFFUNCDESC values accordingly.
got_sec = name_to_sec(elf_sec_list, '.got')
if got_sec is not None:
picreg_offset = got_plt_sec.lma - got_sec.lma
if picreg_offset != 0:
if plt_sec.size % 20 != 0:
raise ValueError('.plt size {} is not multiple of 20'.format(
plt_sec.size))
elf_file_off = lma_to_file_off(uld_sec_list, elf_file_lma)
plt_file_off = lma_to_file_off(elf_sec_list, plt_sec.lma)
for val_off in range(PLT_ENTRY_GOTOFFFUNCDESC_OFFSET, plt_sec.size,
PLT_ENTRY_SIZE):
uld_fd.seek(elf_file_off + plt_file_off + val_off)
val = uld_fd.read(4)
val = struct.unpack('<I', val)[0]
#dprint('.plt + {:08x} GOTOFFFUNCDESC {:08x}->{:08x}'.format(
# val_off, val, val + picreg_offset))
val = struct.pack('<I', val + picreg_offset)
uld_fd.seek(-4, 1)
uld_fd.write(val)
count += 1
dprint('Updated GOTOFFFUNCDESC values for {} plt entries'.format(count))
uld_fd.seek(uld_opos)
def find_elf_file(elf_search_path, elf_filename):
for dir_path in elf_search_path:
path = os.path.join(dir_path, elf_filename)
if os.path.exists(path):
return path
raise ValueError('{} not found.'.format(elf_filename))
def write_elf_file_crc(uld_sec_list, uld_fd, fse):
uld_opos = uld_fd.tell()
elf_file_off = lma_to_file_off(uld_sec_list, fse.file_base)
uld_fd.seek(elf_file_off)
data = uld_fd.read(fse.size)
crc = zlib.crc32(data, 0)
crc &= 0xffffffff
dprint('Patching new crc 0x{:08x} for file {}'.format(crc, fse.name))
crc = struct.pack('<I', crc)
uld_fd.seek(fse.e_file_off + FS_ENTRY_CRC_OFFSET)
uld_fd.write(crc)
uld_fd.seek(uld_opos)
def write_fs_table_crc(uld_fd, fs_table_off, fs_table_size, pstore_off):
uld_opos = uld_fd.tell()
uld_fd.seek(fs_table_off)
data = uld_fd.read(fs_table_size)
crc = zlib.crc32(data, 0)
crc &= 0xffffffff
dprint('Patching new crc 0x{:08x} for fs_table'.format(crc))
crc = struct.pack('<I', crc)
uld_fd.seek(pstore_off + PSTORE_FS_TABLE_CRC_OFFSET)
uld_fd.write(crc)
uld_fd.seek(uld_opos)
def patch_uld_elf(args):
global _debug
dprint('args: {}'.format(args))
uld_sec_list = get_elf_sections(args.uld_path)
uld_fd = open(args.uld_path, 'r+')
uld_sec_dict = sec_list_to_dict(uld_sec_list)
fs_table_sec = uld_sec_dict[args.fs_table_section]
uld_fd.seek(fs_table_sec.file_off)
# Detect dev case where no files are embedded.
if uld_sec_dict.has_key(args.file_section):
fs_table = uld_fd.read(fs_table_sec.file_size)
fs_table = parse_fs_table(fs_table, fs_table_sec.file_off,
uld_sec_list)
else:
fs_table = []
if _debug > 0:
dprint('Read {} FSEntries from {}'.format(len(fs_table),
args.uld_path))
for x in fs_table:
dprint(' {}'.format(x))
uld_dir = os.path.split(args.uld_path)[0]
if args.elf_search_path is not None:
elf_search_path = args.elf_search_path + [uld_dir,]
else:
elf_search_path = [uld_dir,]
pstore_off = uld_sec_dict[args.pstore_section].file_off
pstore_off += args.pstore_offset
for fse in fs_table:
elf_path = find_elf_file(elf_search_path, fse.name)
dprint('Processing file {}'.format(elf_path))
elf_fd = open(elf_path, 'r')
elf_sec_list = get_elf_sections(elf_path)
apply_rofixups(uld_sec_list, uld_fd, elf_sec_list, elf_fd,
fse.file_base)
patch_plt_gotofffuncdesc(uld_sec_list, uld_fd, elf_sec_list, elf_fd,
fse.file_base)
write_elf_file_crc(uld_sec_list, uld_fd, fse)
elf_fd.close()
write_fs_table_crc(uld_fd, fs_table_sec.file_off, fs_table_sec.size,
pstore_off)
uld_fd.close()
def main(argv=None):
if argv is not None:
prog = os.path.basename(argv[0])
else:
prog = 'patch-uld-elf.py'
epilog='\nIf OBJCOPY/READELF is not present in environment ' \
'\'objcopy\' and \'readelf\' will be used.\n' \
'Usage for --elf-search-path:\n' \
' {} /path/to/uld-elf --elf-search-path /path/to/search-1 ' \
'/path/to/search-2'
epilog = epilog.format(prog)
parser = argparse.ArgumentParser(prog=prog,
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Apply rofixups for files contained within uld '
'firmware and recalculate crc32 checksums for files and fs_table',
epilog=epilog)
parser.add_argument('--file-section', type=str, default=DEFAULT_FILES_SEC,
help='Section prefix to for files (default: {})'.format(
DEFAULT_FILES_SEC))
parser.add_argument('--fs-table-section', type=str,
default=DEFAULT_FS_TABLE_SEC,
help='Section prefix to for fs_table (default: {})'.format(
DEFAULT_FS_TABLE_SEC))
parser.add_argument('--pstore-section', type=str,
default=DEFAULT_PSTORE_SEC,
help='Section prefix to for pstore (default: {})'.format(
DEFAULT_PSTORE_SEC))
parser.add_argument('--pstore-offset', type=int,
default=DEFAULT_PSTORE_OFF,
help='Offset from section base for pstore (default: {})'.format(
DEFAULT_PSTORE_OFF))
parser.add_argument('--elf-search-path', type=str, nargs='+',
help='Search path for elf files, each path is processed in order '
'from the command line followed by the base directory of '
'uld-path')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('uld_path', type=str, metavar='uld-path',
help='Path to input file path or name=path to change default '
'name (derived from path and file-path-strip)')
args = parser.parse_args()
global _debug
if args.verbose is True:
_debug = 1
patch_uld_elf(args)
if __name__ == '__main__':
main()
| 31.755396 | 79 | 0.621715 | 1,486 | 0.084164 | 0 | 0 | 0 | 0 | 0 | 0 | 4,638 | 0.262687 |
d7ddf28312cf505e16bdb958ec521e74d90b4d5b | 898 | py | Python | glycowork/tests/test_hierarchyfilter.py | Old-Shatterhand/glycowork | 544fde03dd38cf95fb97792e050d7ff68f5637b1 | [
"MIT"
] | 22 | 2021-04-22T23:53:26.000Z | 2022-03-21T00:36:32.000Z | glycowork/tests/test_hierarchyfilter.py | Old-Shatterhand/glycowork | 544fde03dd38cf95fb97792e050d7ff68f5637b1 | [
"MIT"
] | 3 | 2021-04-23T13:01:07.000Z | 2022-03-16T19:13:12.000Z | glycowork/tests/test_hierarchyfilter.py | Old-Shatterhand/glycowork | 544fde03dd38cf95fb97792e050d7ff68f5637b1 | [
"MIT"
] | 2 | 2021-07-06T14:13:40.000Z | 2021-12-15T15:12:37.000Z | import glycowork
from glycowork.glycan_data.loader import df_species
from glycowork.ml.train_test_split import *
train_x, val_x, train_y, val_y, id_val, class_list, class_converter = hierarchy_filter(df_species,
rank = 'Kingdom')
print(train_x[:10])
train_x, val_x, train_y, val_y, id_val, class_list, class_converter = hierarchy_filter(df_species,
rank = 'Kingdom',
wildcard_seed = True,
wildcard_list = linkages,
wildcard_name = 'bond')
print(train_x[-10:])
| 56.125 | 112 | 0.406459 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.026726 |
d7decdd0887c890cdfcc81772e768a8da0fcad8b | 1,374 | py | Python | scripts/greaseweazle/tools/seek.py | pkorpine/Greaseweazle | 0c016dbf4cd35dcbc12b9e97834c0286026f76b5 | [
"Unlicense"
] | null | null | null | scripts/greaseweazle/tools/seek.py | pkorpine/Greaseweazle | 0c016dbf4cd35dcbc12b9e97834c0286026f76b5 | [
"Unlicense"
] | null | null | null | scripts/greaseweazle/tools/seek.py | pkorpine/Greaseweazle | 0c016dbf4cd35dcbc12b9e97834c0286026f76b5 | [
"Unlicense"
] | null | null | null | # greaseweazle/tools/seek.py
#
# Greaseweazle control script: Seek to specified cylinder.
#
# Written & released by Keir Fraser <keir.xen@gmail.com>
#
# This is free and unencumbered software released into the public domain.
# See the file COPYING for more details, or visit <http://unlicense.org>.
description = "Seek to the specified cylinder."
import sys
from greaseweazle.tools import util
from greaseweazle import error
from greaseweazle import usb as USB
from greaseweazle.flux import Flux
def seek(usb, args, **_kwargs):
"""Seeks to the cylinder specified in args.
"""
usb.seek(args.cylinder, 0)
def main(argv):
parser = util.ArgumentParser(usage='%(prog)s [options] cylinder')
parser.add_argument("--device", help="greaseweazle device name")
parser.add_argument("--drive", type=util.drive_letter, default='A',
help="drive to read (A,B,0,1,2)")
parser.add_argument("cylinder", type=int, help="cylinder to seek")
parser.description = description
parser.prog += ' ' + argv[1]
args = parser.parse_args(argv[2:])
try:
usb = util.usb_open(args.device)
util.with_drive_selected(seek, usb, args, motor=False)
except USB.CmdError as error:
print("Command Failed: %s" % error)
if __name__ == "__main__":
main(sys.argv)
# Local variables:
# python-indent: 4
# End:
| 27.48 | 73 | 0.687045 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 582 | 0.423581 |
d7dfa172aeeb64d0717b70994ce79034e4b91fe6 | 424 | py | Python | find_duplicate_7kyu/find_the_duplicate.py | philipwerner/code-katas | 3bdce2b5d12df612e7c8f2e2b8b5ebe16a653712 | [
"MIT"
] | null | null | null | find_duplicate_7kyu/find_the_duplicate.py | philipwerner/code-katas | 3bdce2b5d12df612e7c8f2e2b8b5ebe16a653712 | [
"MIT"
] | null | null | null | find_duplicate_7kyu/find_the_duplicate.py | philipwerner/code-katas | 3bdce2b5d12df612e7c8f2e2b8b5ebe16a653712 | [
"MIT"
] | null | null | null | """Kata: Find the Duplicated Number in a Consecutive Unsorted List - Finds and returns
the duplicated number from the list
#1 Best Practices Solution by SquishyStrawberry
def find_dup(arr):
return (i for i in arr if arr.count(i) > 1).next()
"""
def find_dup(arr):
"""This will find the duplicated int in the list"""
dup = set([x for x in arr if arr.count(x) > 1])
answer = list(dup)
return answer[0]
| 28.266667 | 87 | 0.683962 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 301 | 0.709906 |
d7dfcf7248542aa01e4dc5de7a80c637f4e680b5 | 2,998 | py | Python | text_generator.py | agvergara/RNN_Text_Generator | c1bcae06085b2ce8ec6f94ba9378c74a9c834c93 | [
"Apache-2.0"
] | null | null | null | text_generator.py | agvergara/RNN_Text_Generator | c1bcae06085b2ce8ec6f94ba9378c74a9c834c93 | [
"Apache-2.0"
] | null | null | null | text_generator.py | agvergara/RNN_Text_Generator | c1bcae06085b2ce8ec6f94ba9378c74a9c834c93 | [
"Apache-2.0"
] | null | null | null | #coding:cp1252
"""
@author: Antonio Gomez Vergara
Generates text using word embeddings (still in process of study how word embeddings works)
"""
import sys
import text_utils
from rnn_class import TextGeneratorModel
MIN_WORD_FREQ = 10
SEED = "Nenita hello"
SEQUENCE_LEN = len(SEED)
STRIDE = 1
BATCH_SIZE = 2048
TEST_PERCENTAGE = 2
CHECKPOINT_FILE = "./checkpoints/LSTM_GEN_word_embeddings_epoch_{epoch:03d}"
EPOCHS = 100
corpus_path = ".\\corpus.txt"
corpus_length_chars, full_text, corpus_length_words, words_in_corpus = text_utils.get_corpus_words(corpus_path)
print("Corpus number of chars -> {}".format(corpus_length_chars))
print("Corpus number of words -> {}".format(corpus_length_words))
num_ignored_words, ignored_words, word_to_index, index_to_word, words_not_ignored, total_words = text_utils.calc_word_frequency(words_in_corpus, full_text, SEED.lower(), MIN_WORD_FREQ)
print("Calculating word frequency. . .")
print("Ignoring words with less than {} frequency".format(MIN_WORD_FREQ))
print("Number of words ignored -> {}".format(num_ignored_words))
print("Number of words after ignoring -> {}".format(len(words_not_ignored)))
sequences, next_words, sequences_ignored = text_utils.check_redundancy(words_in_corpus, ignored_words, SEQUENCE_LEN, STRIDE)
print("Deleting redundant sequences. . .")
print("Sequences ignored -> {} sequences".format(sequences_ignored))
x_train, x_test, y_train, y_test = text_utils.shuffle_split_train_test(sequences, next_words, TEST_PERCENTAGE)
print("Shuffling the sequences and split it into Test({}%)/Train({}%)".format((100-TEST_PERCENTAGE), TEST_PERCENTAGE))
print("Size of Test set -> {}".format(len(x_test)))
print("Size of Train set -> {}".format(len(x_train)))
#Model configuration
print("Configuring model. . .")
diversity = [1.4]
model = TextGeneratorModel(CHECKPOINT_FILE, x_test, x_train, SEQUENCE_LEN, word_to_index, index_to_word, diversity, EPOCHS, total_words, SEED.lower())
input_dim = len(words_not_ignored)
model.build_model(input_dim, lstm_units=128, keep_prob=0.8, output_dim=1024)
optimizer = model.config_rmsprop_optimizer(learning_rate=0.001)
model.model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=["accuracy"])
print("Compiling model. . .")
print('\n')
model.config_callbacks(use_checkpoint=True, use_lambda_callback=True, use_early_stop=False)
steps_per_epoch = int(len(x_train) / BATCH_SIZE) + 1
validate_steps = int(len(x_test) / BATCH_SIZE) + 1
model.model.fit_generator(generator=text_utils.vectorization(x_train, y_train, BATCH_SIZE, word_to_index, SEQUENCE_LEN),
steps_per_epoch=steps_per_epoch,
epochs=EPOCHS,
callbacks=model.callback_list,
validation_data=text_utils.vectorization(x_test, y_test, BATCH_SIZE, word_to_index, SEQUENCE_LEN),
validation_steps=validate_steps)
| 42.828571 | 185 | 0.740494 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 737 | 0.245831 |
d7e19de3f7cb4cc70ee65050649024f1a0772784 | 429 | py | Python | PivotExamples.py | KSim818/sandpit | 30a13e75db7c3c8d0ac84593db251e19144abac2 | [
"Apache-2.0"
] | null | null | null | PivotExamples.py | KSim818/sandpit | 30a13e75db7c3c8d0ac84593db251e19144abac2 | [
"Apache-2.0"
] | null | null | null | PivotExamples.py | KSim818/sandpit | 30a13e75db7c3c8d0ac84593db251e19144abac2 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 16:14:01 2019
@author: KatieSi
"""
# https://jakevdp.github.io/PythonDataScienceHandbook/03.09-pivot-tables.html
import numpy as np
import pandas as pd
import seaborn as sns
titanic = sns.load_dataset('titanic')
titanic.head()
titanic.groupby(['sex', 'class'])['survived'].aggregate('mean').unstack()
titanic.groupby(['sex', 'class'])['survived'].aggregate('count').unstack() | 25.235294 | 77 | 0.708625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 227 | 0.529138 |
d7e340552052f5945e9ff96d2780fc1f63de1b7c | 23,777 | py | Python | pydex/utils/trellis_plotter.py | megansimwei/pydex | a10f3078e2e9396e1e035b7d08d22f10e540cfd4 | [
"MIT"
] | null | null | null | pydex/utils/trellis_plotter.py | megansimwei/pydex | a10f3078e2e9396e1e035b7d08d22f10e540cfd4 | [
"MIT"
] | null | null | null | pydex/utils/trellis_plotter.py | megansimwei/pydex | a10f3078e2e9396e1e035b7d08d22f10e540cfd4 | [
"MIT"
] | null | null | null | from matplotlib import pyplot as plt
import numpy as np
class TrellisPlotter:
def __init__(self):
self.cmap = None
self.colorbar_label_rotation = None
self.colobar_tick_fontsize = None
self.grouped_fun = None
self.fun = None
self.data = None
self.data_sets = None
self.intervals = None
# options
self.figsize = None
self.constrained_layout = False
self.marker = "o"
self.markersize = None
self.markeralpha = None
self.n_xticks = 3
self.n_yticks = 3
self.xspace = 0.3
self.yspace = 0.3
self.xlabel = ""
self.ylabel = ""
self.xticks = None
self.yticks = None
self.xticklabels = None
self.yticklabels = None
self.oaxis_size = 0.20
self.oaxis_n_xticks = 3
self.oaxis_n_yticks = 3
self.oaxis_xticks = None
self.oaxis_yticks = None
self.oaxis_xticklabels = None
self.oaxis_yticklabels = None
self.oaxis_bar_transparency = 0.6
self.oaxis_xlabel = ""
self.oaxis_ylabel = ""
self.n_colorbar_ticks = 3
# computed
self.bounds = None
self.bins = None
self.group_bins = None
self.n_groups = None
self.grouped_data = None
# private
self._multiple_data_sets = None
def initialize(self):
if isinstance(self.data, list):
self._multiple_data_sets = True
else:
self._multiple_data_sets = False
# check data's validity
if self._multiple_data_sets:
if not np.all([isinstance(datum, np.ndarray) for datum in self.data]):
raise SyntaxError("All data sets must be a numpy array.")
if not np.all([datum.ndim == 2 for datum in self.data]):
raise SyntaxError("All data sets must be a 2D-array.")
if not np.all([
datum.shape[1] == self.data[0].shape[1]
for datum in self.data
]):
raise SyntaxError(
"Dimensions of points in the different data sets are inconsistent"
)
else:
if not isinstance(self.data, np.ndarray):
raise SyntaxError("Data must be a numpy array.")
if self.data.ndim != 2:
raise SyntaxError("Data must be a 2D-array.")
# check if all data sets have the same dimension
# check interval's validity
if not isinstance(self.intervals, np.ndarray):
raise SyntaxError("Intervals must be a numpy array.")
if self.intervals.ndim != 1:
raise SyntaxError("Intervals must be a 1D-array.")
# check if interval agrees with given data
if self._multiple_data_sets:
if self.intervals.shape[0] != (self.data[0].shape[1] - 2):
raise SyntaxError("Dimensions in given interval and data does not agree.")
else:
if self.intervals.shape[0] != (self.data.shape[1] - 2):
raise SyntaxError("Dimensions in given interval and data does not agree.")
self.n_groups = np.prod(self.intervals)
if self._multiple_data_sets:
self.data_sets = self.data
if self.fun is not None:
if not isinstance(self.fun, np.ndarray):
raise SyntaxError("Function values must be a numpy array.")
if self.fun.ndim != 1:
raise SyntaxError(f"Function values must be 1D array")
if self.fun.size != self.data.shape[0]:
raise SyntaxError(f"Length of function values and given data points "
f"are inconsistent.")
return None
def scatter(self):
self.initialize()
if not self._multiple_data_sets:
self.classify_data()
width_ratios = np.ones(self.intervals[1] + 1)
width_ratios[-1] = self.oaxis_size
height_ratios = np.ones(self.intervals[0] + 1)
height_ratios[0] = self.oaxis_size
fig, axes = plt.subplots(
nrows=self.intervals[0]+1,
ncols=self.intervals[1]+1,
gridspec_kw={
"wspace": self.xspace,
"hspace": self.yspace,
"width_ratios": width_ratios,
"height_ratios": height_ratios,
},
figsize=self.figsize,
constrained_layout=self.constrained_layout
)
for pos, axis in np.ndenumerate(axes):
r, c = pos
if r == 0 and c == self.intervals[1]:
fig.delaxes(axis)
# horizontal outer axis
elif r == 0 and c != self.intervals[1]:
# handle limits
axis.set_xlim([self.bounds[3, 0], self.bounds[3, 1]])
axis.set_ylim([0, 1])
# handle ticks
axis.set_yticks([])
axis.xaxis.tick_top()
if c % 2 == 0:
self.oaxis_xticks = np.linspace(
self.bounds[3, 0],
self.bounds[3, 1],
self.oaxis_n_xticks
)
axis.set_xticks(self.oaxis_xticks)
if self.oaxis_xticklabels is None:
self.oaxis_xticklabels = [
f"{tick:.2f}" for tick in self.oaxis_xticks
]
axis.xaxis.set_ticklabels(self.oaxis_xticklabels)
else:
axis.set_xticks([])
# draw bar
axis.fill_between(
x=[
self.group_bins[0, c, 1, 0],
self.group_bins[0, c, 1, 1],
],
y1=[1, 1],
y2=[0, 0],
facecolor="gray",
alpha=1 - self.oaxis_bar_transparency
)
# add label
if c % 2 == 1:
axis.annotate(
s=self.oaxis_xlabel,
xy=(np.mean(self.bounds[3, :]), 0.5),
ha="center",
va="center",
)
# vertical outer axis
elif r != 0 and c == self.intervals[1]:
# draw vertical outer axes
axis.set_xlim([0, 1])
axis.set_ylim([self.bounds[2, 0], self.bounds[2, 1]])
# handle ticks
axis.set_xticks([])
axis.yaxis.tick_right()
if r % 2 == 0:
if self.oaxis_yticks is None:
self.oaxis_yticks = np.linspace(
self.bounds[2, 0], self.bounds[2, 1], self.oaxis_n_yticks
)
axis.set_yticks(self.oaxis_yticks)
if self.oaxis_yticklabels is None:
self.oaxis_yticklabels = [f"{tick:.2f}"
for tick in self.oaxis_yticks]
axis.yaxis.set_ticklabels(self.oaxis_yticklabels)
else:
axis.set_yticks([])
# draw bar
axis.fill_between(
x=[0, 1],
y1=[
self.group_bins[r-1, 0, 0, 1],
self.group_bins[r-1, 0, 0, 1],
],
y2=[
self.group_bins[r-1, 0, 0, 0],
self.group_bins[r-1, 0, 0, 0],
],
facecolor="gray",
alpha=1 - self.oaxis_bar_transparency
)
# add label
if r % 2 == 0:
axis.annotate(
s=self.oaxis_ylabel,
xy=(0.50, np.mean(self.bounds[2, :])),
verticalalignment="center",
horizontalalignment="center",
rotation=270
)
# scatter
elif r != 0 and c != self.intervals[1]:
axis.scatter(
self.grouped_data[r-1, c, :, 0],
self.grouped_data[r-1, c, :, 1],
marker=self.marker,
s=self.markersize,
alpha = self.markeralpha,
)
axis.set_xlim([
self.bounds[0, 0] - 0.10 * (self.bounds[0, 1] - self.bounds[0, 0]),
self.bounds[0, 1] + 0.10 * (self.bounds[0, 1] - self.bounds[0, 0]),
])
axis.set_ylim([
self.bounds[1, 0] - 0.10 * (self.bounds[1, 1] - self.bounds[1, 0]),
self.bounds[1, 1] + 0.10 * (self.bounds[1, 1] - self.bounds[1, 0]),
])
if c % 2 == 0 and r == self.intervals[0]:
if self.xticks is None:
self.xticks = np.linspace(
self.bounds[0, 0], self.bounds[0, 1], self.n_xticks
)
axis.set_xticks(self.xticks)
if self.xticklabels is None:
self.xticklabels = [f"{ticks:.2f}" for ticks in self.xticks]
axis.xaxis.set_ticklabels(self.xticklabels)
else:
axis.set_xticks([])
if r % 2 == 0 and c == 0:
if self.yticks is None:
self.yticks = np.linspace(
self.bounds[1, 0], self.bounds[1, 1], self.n_yticks
)
axis.set_yticks(self.yticks)
if self.yticklabels is None:
self.yticklabels = [f"{ticks:.2f}" for ticks in self.yticks]
axis.yaxis.set_ticklabels(self.yticklabels)
else:
axis.set_yticks([])
if c % 2 == 1 and r == self.intervals[0]:
axis.set_xlabel(self.xlabel)
if r % 2 == 1 and c == 0:
axis.set_ylabel(self.ylabel)
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.show()
else:
for d_set in self.data_sets:
self.data = d_set
self.scatter()
return None
def contour(self, fun=None, levels=None, scatter_data=False):
if fun is not None:
self.fun = fun
self.initialize()
if not self._multiple_data_sets:
self.classify_data()
width_ratios = np.ones(self.intervals[1] + 1)
width_ratios[-1] = self.oaxis_size
height_ratios = np.ones(self.intervals[0] + 1)
height_ratios[0] = self.oaxis_size
fig, axes = plt.subplots(
nrows=self.intervals[0] + 1,
ncols=self.intervals[1] + 1,
gridspec_kw={
"wspace": self.xspace,
"hspace": self.yspace,
"width_ratios": width_ratios,
"height_ratios": height_ratios,
},
figsize=self.figsize,
constrained_layout=self.constrained_layout,
# sharex="col",
# sharey="row",
)
fig.subplots_adjust(
top=0.95,
bottom=0.05,
left=0.05,
right=0.95,
hspace=0.2,
wspace=0.2
)
for pos, axis in np.ndenumerate(axes):
c_axes = []
r, c = pos
if r == 0 and c == self.intervals[1]:
fig.delaxes(axis)
# horizontal outer axis
elif r == 0 and c != self.intervals[1]:
# handle limits
axis.set_xlim([self.bounds[3, 0], self.bounds[3, 1]])
axis.set_ylim([0, 1])
# handle ticks
axis.set_yticks([])
axis.xaxis.tick_top()
if c % 2 == 0:
self.oaxis_xticks = np.linspace(
self.bounds[3, 0],
self.bounds[3, 1],
self.oaxis_n_xticks
)
axis.set_xticks(self.oaxis_xticks)
if self.oaxis_xticklabels is None:
self.oaxis_xticklabels = [
f"{tick:.2f}" for tick in self.oaxis_xticks
]
axis.xaxis.set_ticklabels(self.oaxis_xticklabels)
else:
axis.set_xticks([])
# draw bar
axis.fill_between(
x=[
self.group_bins[0, c, 1, 0],
self.group_bins[0, c, 1, 1],
],
y1=[1, 1],
y2=[0, 0],
facecolor="gray",
alpha=1 - self.oaxis_bar_transparency
)
# add label
if c % 2 == 1:
axis.annotate(
s=self.oaxis_xlabel,
xy=(np.mean(self.bounds[3, :]), 0.5),
ha="center",
va="center",
)
# vertical outer axis
elif r != 0 and c == self.intervals[1]:
# draw vertical outer axes
axis.set_xlim([0, 1])
axis.set_ylim([self.bounds[2, 0], self.bounds[2, 1]])
# handle ticks
axis.set_xticks([])
axis.yaxis.tick_right()
if r % 2 == 0:
if self.oaxis_yticks is None:
self.oaxis_yticks = np.linspace(
self.bounds[2, 0], self.bounds[2, 1], self.oaxis_n_yticks
)
axis.set_yticks(self.oaxis_yticks)
if self.oaxis_yticklabels is None:
self.oaxis_yticklabels = [f"{tick:.2f}"
for tick in self.oaxis_yticks]
axis.yaxis.set_ticklabels(self.oaxis_yticklabels)
else:
axis.set_yticks([])
# draw bar
axis.fill_between(
x=[0, 1],
y1=[
self.group_bins[r - 1, 0, 0, 1],
self.group_bins[r - 1, 0, 0, 1],
],
y2=[
self.group_bins[r - 1, 0, 0, 0],
self.group_bins[r - 1, 0, 0, 0],
],
facecolor="gray",
alpha=1 - self.oaxis_bar_transparency
)
# add label
if r % 2 == 0:
axis.annotate(
s=self.oaxis_ylabel,
xy=(0.50, np.mean(self.bounds[2, :])),
verticalalignment="center",
horizontalalignment="center",
rotation=270
)
# contour
elif r != 0 and c != self.intervals[1]:
c_axes.append(axis)
contourf = axis.tricontourf(
self.grouped_data[r - 1, c, :, 0][~np.isnan(self.grouped_data[r-1, c, :, 0])],
self.grouped_data[r - 1, c, :, 1][~np.isnan(self.grouped_data[r-1, c, :, 1])],
self.grouped_fun[r - 1, c, :][~np.isnan(self.grouped_fun[r - 1, c, :])],
levels=levels,
cmap=self.cmap,
)
if scatter_data:
axis.scatter(
self.grouped_data[r - 1, c, :, 0],
self.grouped_data[r - 1, c, :, 1],
alpha=self.markeralpha,
marker="o",
c="white",
s=self.markersize,
)
axis.set_xlim([
self.bounds[0, 0] - 0.10 * (
self.bounds[0, 1] - self.bounds[0, 0]),
self.bounds[0, 1] + 0.10 * (
self.bounds[0, 1] - self.bounds[0, 0]),
])
axis.set_ylim([
self.bounds[1, 0] - 0.10 * (
self.bounds[1, 1] - self.bounds[1, 0]),
self.bounds[1, 1] + 0.10 * (
self.bounds[1, 1] - self.bounds[1, 0]),
])
if c % 2 == 0 and r == self.intervals[0]:
if self.xticks is None:
self.xticks = np.linspace(
self.bounds[0, 0], self.bounds[0, 1], self.n_xticks
)
axis.set_xticks(self.xticks)
if self.xticklabels is None:
self.xticklabels = [f"{ticks:.2f}" for ticks in self.xticks]
axis.xaxis.set_ticklabels(self.xticklabels)
else:
axis.set_xticks([])
if r % 2 == 0 and c == 0:
if self.yticks is None:
self.yticks = np.linspace(
self.bounds[1, 0], self.bounds[1, 1], self.n_yticks
)
axis.set_yticks(self.yticks)
if self.yticklabels is None:
self.yticklabels = [f"{ticks:.2f}" for ticks in self.yticks]
axis.yaxis.set_ticklabels(self.yticklabels)
else:
axis.set_yticks([])
if c % 2 == 1 and r == self.intervals[0]:
axis.set_xlabel(self.xlabel)
if r % 2 == 1 and c == 0:
axis.set_ylabel(self.ylabel)
colorbar_ticks = np.linspace(
np.nanmin(self.grouped_fun[r-1, c, :]),
np.nanmax(self.grouped_fun[r-1, c, :]),
self.n_colorbar_ticks,
)
colorbar = fig.colorbar(
contourf,
ax=axis,
shrink=1.0,
orientation="vertical",
pad=0.05,
fraction=0.15,
ticks=colorbar_ticks,
)
colorbar.ax.tick_params(
labelsize=self.colobar_tick_fontsize,
labelrotation=self.colorbar_label_rotation,
)
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.show()
else:
for d_set in self.data_sets:
self.data = d_set
self.scatter()
return None
def get_bounds(self):
self.bounds = np.array(
[np.nanmin(self.data, axis=0), np.nanmax(self.data, axis=0)]
).T
return self.bounds
def get_bins(self):
self.bins = []
for d, bound in enumerate(self.bounds):
if d > 1:
self.bins.append(np.linspace(bound[0], bound[1], self.intervals[d-2]+1))
self.group_bins = np.empty(shape=(self.intervals[0], self.intervals[1], 2, 2))
for r in range(self.intervals[0]):
for c in range(self.intervals[1]):
self.group_bins[r, c, :, :] = np.array([
[self.bins[0][r], self.bins[0][r+1]],
[self.bins[1][c], self.bins[1][c+1]]
])
self.group_bins = np.flip(self.group_bins, axis=0)
return self.group_bins
def classify_data(self):
self.get_bounds()
self.get_bins()
self.grouped_data = np.full((
self.intervals[0],
self.intervals[1],
self.data.shape[0],
self.data.shape[1],
), fill_value=np.nan)
if self.fun is not None:
self.grouped_fun = np.full((
self.intervals[0],
self.intervals[1],
self.data.shape[0],
), fill_value=np.nan)
for r in range(self.intervals[0]):
for c in range(self.intervals[1]):
for p, datum in enumerate(self.data):
check1 = datum[2] >= self.group_bins[r, c, 0, 0]
check2 = datum[2] <= self.group_bins[r, c, 0, 1]
check3 = datum[3] >= self.group_bins[r, c, 1, 0]
check4 = datum[3] <= self.group_bins[r, c, 1, 1]
if np.all([check1, check2, check3, check4]):
self.grouped_data[r, c, p, :] = datum
if self.fun is not None:
self.grouped_fun[r, c, p] = self.fun[p]
return self.grouped_data
def add_data(self, data):
if self.data is None:
self.data = data
else:
self.data = [self.data, data]
if __name__ == "__main__":
def fun(x):
return x[0] ** 2 + x[1] ** 2 + x[2] ** 2 + x[3] ** 2
# def fun(x):
# return x[0] + x[1] + x[2] + x[3]
# def fun(x):
# return x[0] ** 3 + x[1] ** 3 + x[2] ** 3 + x[3] ** 3
# def fun(x):
# return np.sin(x[0]) + np.sin(x[1]) + np.sin(x[2]) + np.sin(x[3])
# def fun(x):
# return x[0] ** 4 + x[1] ** 4 + x[2] ** 4 + x[3] ** 4
plotter1 = TrellisPlotter()
reso = 5j
multiplier = 2
x1, x2, x3, x4 = np.mgrid[
-1:1:reso*multiplier,
-1:1:reso*multiplier,
-1:1:reso,
-1:1:reso
]
plotter1.data = np.array([x1.flatten(), x2.flatten(), x3.flatten(), x4.flatten()]).T
plotter1.fun = fun(plotter1.data.T)
plotter1.intervals = np.array([5, 5])
plotter1.markeralpha = 0.10
plotter1.markersize = 5
plotter1.n_colorbar_ticks = 4
plotter1.cmap = plt.get_cmap("inferno")
plotter1.contour(levels=10, scatter_data=True)
| 40.644444 | 102 | 0.415906 | 22,661 | 0.953064 | 0 | 0 | 0 | 0 | 0 | 0 | 1,653 | 0.069521 |
d7e3b405d8a70706809a3a43f3a1d5801c3fe2c8 | 1,866 | py | Python | ros_mqtt_bridge/ros_to_mqtt.py | CPFL/ros_mqtt_bridge | 21cd3fd718b11efe17a9bdabaac212a93048c82b | [
"Apache-2.0"
] | 5 | 2018-09-28T08:10:28.000Z | 2022-01-23T12:58:17.000Z | ros_mqtt_bridge/ros_to_mqtt.py | CPFL/ros_mqtt_bridge | 21cd3fd718b11efe17a9bdabaac212a93048c82b | [
"Apache-2.0"
] | 1 | 2018-04-10T03:00:47.000Z | 2018-04-10T03:00:47.000Z | ros_mqtt_bridge/ros_to_mqtt.py | CPFL/ros_mqtt_bridge | 21cd3fd718b11efe17a9bdabaac212a93048c82b | [
"Apache-2.0"
] | 3 | 2019-04-01T05:11:17.000Z | 2021-09-15T14:08:57.000Z | #!/usr/bin/env python
# coding: utf-8
import yaml
import json
import rospy
import paho.mqtt.client as mqtt
from ros_mqtt_bridge.args_setters import ArgsSetters
class ROSToMQTT(ArgsSetters):
def __init__(self, from_topic, to_topic, message_type):
super(ROSToMQTT, self).__init__(message_type)
self.__mqtt_client = None
self.args["mqtt"]["publish"]["topic"] = to_topic
self.args["ros"]["wait_for_message"]["topic"] = from_topic
self.args["ros"]["wait_for_message"]["topic_type"] = self.args["ros"]["data_class"]
def connect_ros(self):
if "name" not in self.args["ros"]["init_node"]:
self.args["ros"]["init_node"]["name"] = "ros_mqtt_bridge"
self.args["ros"]["init_node"]["anonymous"] = True
rospy.init_node(**self.args["ros"]["init_node"])
def connect_mqtt(self):
self.__mqtt_client = mqtt.Client(**self.args["mqtt"]["client"])
if self.args["mqtt"]["tls"] is not None:
self.set_mqtt_tls()
self.__mqtt_client.connect(**self.args["mqtt"]["connect"])
def set_mqtt_tls(self):
self.__mqtt_client.tls_set(**self.args["mqtt"]["tls"])
self.__mqtt_client.tls_insecure_set(True)
def start(self):
self.connect_mqtt()
self.connect_ros()
self.__rospy_rate = rospy.Rate(**self.args["ros"]["rate"])
while not rospy.is_shutdown():
try:
message_yaml = str(rospy.wait_for_message(**self.args["ros"]["wait_for_message"]))
self.args["mqtt"]["publish"]["payload"] = json.dumps(yaml.load(message_yaml))
self.__mqtt_client.publish(**self.args["mqtt"]["publish"])
self.__rospy_rate.sleep()
except rospy.ROSException:
pass
except rospy.ROSInterruptException:
break
| 34.555556 | 98 | 0.614148 | 1,700 | 0.91104 | 0 | 0 | 0 | 0 | 0 | 0 | 368 | 0.197213 |
d7e4e36801cb32ad61d2a64ea35c1d50ecc72249 | 13,999 | py | Python | mmderain/models/backbones/rlnet.py | biubiubiiu/derain-toolbox | 1669138d1aaa72c986d70d03f9cde7dbbbb70fa1 | [
"Apache-2.0"
] | 4 | 2022-02-22T13:20:15.000Z | 2022-03-23T03:30:15.000Z | mmderain/models/backbones/rlnet.py | biubiubiiu/derain-toolbox | 1669138d1aaa72c986d70d03f9cde7dbbbb70fa1 | [
"Apache-2.0"
] | null | null | null | mmderain/models/backbones/rlnet.py | biubiubiiu/derain-toolbox | 1669138d1aaa72c986d70d03f9cde7dbbbb70fa1 | [
"Apache-2.0"
] | 1 | 2022-03-22T14:10:37.000Z | 2022-03-22T14:10:37.000Z | from functools import partial
from typing import List, Optional, Sequence, Tuple
import torch
import torch.nn.functional as F
from torch import nn
from mmderain.models.common import sizeof
from mmderain.models.registry import BACKBONES
from mmderain.models.layers import SELayer_Modified
class ResidualBlock(nn.Module):
def __init__(self, planes: int) -> None:
super().__init__()
self.model = nn.Sequential(
nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1),
nn.GroupNorm(num_groups=8, num_channels=planes)
)
self.act = nn.LeakyReLU(0.2, inplace=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.model(x)
out = self.act(out + x)
return out
class FFRB(nn.Module):
"""Feature fusion residual block"""
def __init__(
self,
in_planes: int,
mid_planes: int,
out_planes: int,
kernel_size: int
) -> None:
super().__init__()
inter_planes = mid_planes * 4
planes_per_group = 4
self.model0 = nn.Sequential(
nn.Conv2d(in_planes, inter_planes, kernel_size=3, stride=1, padding=1),
nn.GroupNorm(num_groups=inter_planes // planes_per_group, num_channels=inter_planes),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(inter_planes, mid_planes,
kernel_size=kernel_size, stride=1, padding=(kernel_size - 1) // 2),
nn.GroupNorm(num_groups=mid_planes//planes_per_group, num_channels=mid_planes),
nn.LeakyReLU(0.2, inplace=True)
)
self.model1 = nn.Sequential(
nn.Conv2d(in_planes+mid_planes, out_planes, kernel_size=3, stride=1, padding=1),
nn.GroupNorm(num_groups=4, num_channels=out_planes),
nn.LeakyReLU(0.2, inplace=True),
SELayer_Modified(out_planes, reduction=out_planes//6,
bias=True, act=nn.LeakyReLU(0.2, inplace=True))
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.model0(x)
out = torch.cat([x, out], dim=1)
out = self.model1(out)
return out
class Encoder(nn.Module):
def __init__(self, layers: List[nn.Module]) -> None:
super().__init__()
self.models = nn.ModuleList(layers)
self.downsample = nn.AvgPool2d(kernel_size=2, stride=2)
def forward(self, x: torch.Tensor) -> Tuple[List[torch.Tensor]]:
features = []
out = x
for model in self.models:
out = model(out)
features.append(out)
out = self.downsample(out)
return out, features
class Decoder(nn.Module):
def __init__(self, layers: List[nn.Module]) -> None:
super().__init__()
self.models = nn.ModuleList(layers)
self.upsample = partial(F.interpolate, mode='nearest', align_corners=None)
def forward(self, x: torch.Tensor, bridges: Sequence[torch.Tensor]) -> torch.Tensor:
features = []
out = x
for model in self.models:
out = model(out)
out = self.upsample(out, scale_factor=2)
out = torch.cat([out, bridges.pop()], dim=1)
features.append(out)
return out, features
class UFFRB(nn.Module):
"""U-Net structure constructed with FFRBs"""
def __init__(self, planes: int, depth: int) -> None:
super().__init__()
ffrb_builder = partial(FFRB, mid_planes=planes, out_planes=planes, kernel_size=3)
self.encoder = Encoder([ffrb_builder(in_planes=planes) for _ in range(depth // 2)])
self.decoder = Decoder([ffrb_builder(in_planes=planes if i == 0 else planes*2)
for i in range(depth//2)])
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = x
out, encoder_features = self.encoder(out)
out, _ = self.decoder(out, encoder_features)
return out
class Foundation(nn.Module):
"""Structure for feature compensator and error detector"""
def __init__(
self,
in_planes: int,
mid_planes: int,
out_planes: int,
uffrb_depth: int = -1,
n_ffrb: int = 3,
act: Optional[str] = None
) -> None:
super().__init__()
models = []
planes_per_group = 4
models.extend([
nn.Conv2d(in_planes, mid_planes, kernel_size=3, stride=1, padding=1),
nn.GroupNorm(num_groups=mid_planes//planes_per_group, num_channels=mid_planes),
nn.LeakyReLU(0.2, inplace=True)
])
use_uffrb = uffrb_depth > 0
if use_uffrb:
models.append(UFFRB(mid_planes, uffrb_depth))
for i in range(n_ffrb):
if use_uffrb and i == 0:
models.append(FFRB(mid_planes*2, mid_planes, mid_planes, kernel_size=3))
else:
models.append(FFRB(mid_planes, mid_planes, mid_planes, kernel_size=3))
models.append(nn.Conv2d(mid_planes, out_planes, kernel_size=3, stride=1, padding=1))
if act == 'leakyrelu':
models.append(nn.LeakyReLU(0.2, inplace=True))
elif act == 'sigmoid':
models.append(nn.Sigmoid())
self.model = nn.Sequential(*models)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class FeatureCompensator(Foundation):
def __init__(
self,
in_planes: int,
mid_planes: int,
out_planes: int,
use_uffrb: bool = True,
n_ffrb: int = 3,
act: str = 'leakyrelu'
) -> None:
uffrb_depth = 6 if use_uffrb else -1
super().__init__(in_planes, mid_planes, out_planes, uffrb_depth, n_ffrb, act)
class ErrorDetector(Foundation):
def __init__(
self,
in_planes: int,
mid_planes: int,
out_planes: int,
use_uffrb: bool,
n_ffrb: int,
act: Optional[str] = None
) -> None:
uffrb_depth = 6 if use_uffrb else -1
super().__init__(in_planes, mid_planes, out_planes, uffrb_depth, n_ffrb, act)
class Refinement(nn.Module):
"""Refinement Module"""
def __init__(
self,
in_planes: int,
mid_planes: int,
out_planes: int,
n_scale: int,
n_residual: int
) -> None:
super().__init__()
self.multi_scale = nn.ModuleList([
nn.Sequential(
nn.Conv2d(in_planes, 1, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, inplace=True)
)
for _ in range(n_scale)
])
self.conv0 = nn.Sequential(
nn.Conv2d(in_planes+4, mid_planes, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(0.2, inplace=True)
)
self.stacked_rb = nn.ModuleList([
ResidualBlock(mid_planes)
for _ in range(n_residual)
])
self.use_feature_idxs = [0, 3, 6]
self.last = nn.Sequential(
nn.Conv2d(mid_planes * len(self.use_feature_idxs), mid_planes,
kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(mid_planes, mid_planes // 2, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(mid_planes//2, out_planes, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(0.2, inplace=True)
)
self.upsample = partial(F.interpolate, mode='nearest', align_corners=None)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# mutli-scale processing
out_shape = sizeof(x)
outputs = []
for i, model in enumerate(self.multi_scale):
tmp = F.avg_pool2d(x, kernel_size=2**(i+2))
tmp = model(tmp)
tmp = self.upsample(tmp, size=out_shape)
outputs.append(tmp)
multi_scale_out = torch.cat(outputs, dim=1)
# pass through stacked residual blocks
out = torch.cat([multi_scale_out, x], dim=1)
out = self.conv0(out)
features = []
for i, model in enumerate(self.stacked_rb):
out = model(out)
if i in self.use_feature_idxs:
features.append(out)
out = torch.cat(features, dim=1)
out = self.last(out)
return out
@BACKBONES.register_module()
class RLNet(nn.Module):
"""DerainRLNet Network Structure
Paper: Robust Representation Learning with Feedback for Single Image Deraining
Official Code: https://github.com/LI-Hao-SJTU/DerainRLNet
Args:
in_channels (int): Channel number of inputs.
out_channels (int): Channel number of outputs.
mid_channels (list[int]): Channel number of intermediate features. Default: [24, 32, 18]
theta (list[float]): Values of theta1 and theta2. Default: [0.15, 0.05]
n_scale (int): Number of scales in refinement module. Default: 4
n_residual (int): Number of residual blocks in refinement module. Default: 7
"""
def __init__(
self,
in_channels: int,
out_channels: int,
mid_channels: List[int] = [24, 32, 18],
theta: List[float] = [0.15, 0.05],
n_scale: int = 4,
n_residual: int = 7
) -> None:
super().__init__()
theta1, theta2 = theta
self.theta1 = theta1
self.theta2 = theta2
mid0, mid1, mid2 = mid_channels
self.conv0 = nn.Sequential(
nn.Conv2d(in_channels, mid0, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, inplace=True)
)
# separate branches
encoder_builder = partial(FFRB, mid0, mid0, mid0)
self.encoder1 = Encoder([encoder_builder(kernel_size=3) for _ in range(3)])
self.encoder2 = Encoder([encoder_builder(kernel_size=5) for _ in range(3)])
self.encoder3 = Encoder([encoder_builder(kernel_size=7) for _ in range(3)])
decoder_builders = [
partial(FFRB, mid0, mid0, mid0),
partial(FFRB, mid0*2+3*out_channels, mid0, mid0),
partial(FFRB, mid0*2+3*out_channels, mid0, mid0)
]
self.decoder1 = Decoder([f(kernel_size=3) for f in decoder_builders])
self.decoder2 = Decoder([f(kernel_size=5) for f in decoder_builders])
self.decoder3 = Decoder([f(kernel_size=7) for f in decoder_builders])
# feature compensators
self.fc1_internal = FeatureCompensator(3*mid0, mid1, out_channels)
self.fc2_internal = FeatureCompensator(3*mid0, mid1, out_channels)
self.fc1_externel = FeatureCompensator(in_channels, mid1, out_channels,
use_uffrb=False, n_ffrb=1, act='sigmoid')
self.fc2_externel = FeatureCompensator(in_channels, mid1, out_channels,
use_uffrb=False, n_ffrb=1, act='sigmoid')
# error detectors
self.ed1 = ErrorDetector(3*(mid0*2+3*out_channels), mid1, out_channels,
use_uffrb=False, n_ffrb=5, act='leakyrelu')
self.ed2 = ErrorDetector(in_channels+out_channels, mid1, out_channels,
use_uffrb=True, n_ffrb=4, act='sigmoid')
# post processor
self.fusion = nn.Sequential(
nn.Conv2d(6*mid0, mid2, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, inplace=True)
)
self.refinement = Refinement(mid2 + 6*out_channels, mid1, out_channels, n_scale, n_residual)
self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
self.upsample = partial(F.interpolate, mode='nearest', align_corners=None)
def forward(self, x: torch.Tensor, y: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor]:
out = self.conv0(x)
out1, features1 = self.encoder1(out)
out2, features2 = self.encoder2(out)
out3, features3 = self.encoder3(out)
# feature compensation
FA1, FA2, FA3 = features1[1], features2[1], features3[1]
FB1, FB2, FB3 = features1[2], features2[2], features3[2]
F1 = self.fc1_internal(torch.cat([FA1, FA2, FA3], dim=1))
F2 = self.fc2_internal(torch.cat([FB1, FB2, FB3], dim=1))
refined1 = [features1[0]] + \
[torch.cat([FA1, F1, F1, F1], dim=1), torch.cat([FB1, F2, F2, F2], dim=1)]
refined2 = [features2[0]] + \
[torch.cat([FA2, F1, F1, F1], dim=1), torch.cat([FB2, F2, F2, F2], dim=1)]
refined3 = [features3[0]] + \
[torch.cat([FA3, F1, F1, F1], dim=1), torch.cat([FB3, F2, F2, F2], dim=1)]
out1, dec_feat1 = self.decoder1(out1, refined1)
out2, dec_feat2 = self.decoder2(out2, refined2)
out3, dec_feat3 = self.decoder3(out3, refined3)
# error detection
FE1, FE2, FE3 = dec_feat1[1], dec_feat2[1], dec_feat3[1]
phi1 = self.ed1(torch.cat([FE1, FE2, FE3], dim=1))
phi = self.ed2(torch.cat([self.pool(x), phi1], dim=1))
err = torch.div(self.theta1, phi) - self.theta1
phi1_prime = F.relu(phi1-err*(1-2*phi1), inplace=True)
phi1_prime = self.upsample(phi1_prime, scale_factor=2)
# post processing
out = torch.cat([out1, out2, out3], dim=1)
out = self.fusion(out)
# inject error information
out = torch.cat([out, phi1_prime, phi1_prime, phi1_prime,
phi1_prime, phi1_prime, phi1_prime], dim=1)
# refine
out = self.refinement(out)
if y is None:
return out, F1, F2, phi1, phi
else:
y2 = F.avg_pool2d(y, kernel_size=2, stride=2)
y4 = F.avg_pool2d(y, kernel_size=4, stride=4)
k2 = self.fc1_externel(y2)
k4 = self.fc2_externel(y4)
y2 = y2 + self.theta2 * self.theta2 * k2 * y2
y4 = y4 + self.theta2 * self.theta2 * k4 * y4
return out, F1, F2, phi1, phi, y2, y4, k2, k4
| 35.085213 | 100 | 0.591114 | 13,650 | 0.97507 | 0 | 0 | 5,622 | 0.4016 | 0 | 0 | 1,122 | 0.080149 |
d7e53f75f37c8e40de012dfc26ba2d6bccb8b7ab | 200 | py | Python | practice/calculator.py | kristenpicard/python-practice | 71e6b7e0af68b1eba5f57ad8c836fe250ab7d6db | [
"MIT"
] | null | null | null | practice/calculator.py | kristenpicard/python-practice | 71e6b7e0af68b1eba5f57ad8c836fe250ab7d6db | [
"MIT"
] | null | null | null | practice/calculator.py | kristenpicard/python-practice | 71e6b7e0af68b1eba5f57ad8c836fe250ab7d6db | [
"MIT"
] | null | null | null | class Calculator:
def add(self,a,b):
return a+b
def subtract(self,a,b):
return a-b
def multiply(self,a,b):
return a*b
def divide(self,a,b):
return a/b
| 18.181818 | 27 | 0.54 | 198 | 0.99 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d7e5a49c03c6113e3e09dbacc624a1673c95d301 | 1,336 | py | Python | 10/lottery.py | sukio-1024/codeitPy | 43f4c6d5205eab9cb4780ceb8799b04ce7b10acb | [
"MIT"
] | null | null | null | 10/lottery.py | sukio-1024/codeitPy | 43f4c6d5205eab9cb4780ceb8799b04ce7b10acb | [
"MIT"
] | null | null | null | 10/lottery.py | sukio-1024/codeitPy | 43f4c6d5205eab9cb4780ceb8799b04ce7b10acb | [
"MIT"
] | null | null | null | from random import randint
# 무작위로 정렬된 1 - 45 사이의 숫자 여섯개 뽑기
def generate_numbers():
# 코드를 입력하세요
numbers = []
while (len(numbers) < 6):
number = randint(1, 45)
if number not in numbers:
numbers.append(number)
numbers.sort()
return numbers
# 보너스까지 포함해 7개 숫자 뽑기
# 정렬된 6개의 당첨 번호와 1개의 보너스 번호 리스트를 리턴
# 예: [1, 7, 13, 23, 31, 41, 15]
def draw_winning_numbers():
# 코드를 입력하세요
win_num_list = generate_numbers()
Bnumber = randint(1,45)
while(Bnumber in win_num_list):
Bnumber = randint(1,45)
win_num_list.append(Bnumber)
return win_num_list
# 두 리스트에서 중복되는 숫자가 몇개인지 구하기
def count_matching_numbers(list1, list2):
# 코드를 입력하세요
count = 0
for i in range(0,6):
if(list1[i] in list2):
count += 1
return count
# 로또 등수 확인하기
def check(numbers, winning_numbers):
# 코드를 입력하세요
countNum = count_matching_numbers(numbers, winning_numbers[:6])
if(winning_numbers[len(winning_numbers)-1] not in numbers):
if(countNum == 6):
return 1000000000
elif(countNum == 5):
return 1000000
elif(countNum == 4):
return 50000
elif (countNum == 3):
return 5000
else :
if(countNum == 6):
return 50000000
else :
return 0
| 22.644068 | 67 | 0.589072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 428 | 0.273657 |
d7e7049f9cceb5d987f7f1050440f62419a1e6b8 | 1,614 | py | Python | pyreach/tools/reach.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | 13 | 2021-09-01T01:10:22.000Z | 2022-03-05T10:01:52.000Z | pyreach/tools/reach.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | null | null | null | pyreach/tools/reach.py | google-research/pyreach | f91753ce7a26e77e122eb02a9fdd5a1ce3ce0159 | [
"Apache-2.0"
] | 6 | 2021-09-20T21:17:53.000Z | 2022-03-14T18:42:48.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper script to download and invoke reach command automatically."""
import os
import subprocess
import sys
from typing import Sequence
from absl import app # type: ignore
from absl import flags # type: ignore
from pyreach.impl import reach_tools_impl
flags.DEFINE_string("o", None, "Reach working directory")
def main(unused_argv: Sequence[str]) -> None:
workspace = flags.FLAGS.o
if workspace is None:
workspace = reach_tools_impl.create_reach_workspace()
reach_file, _ = reach_tools_impl.download_reach_tool(workspace)
args = sys.argv[1:]
if (args and "connect" == args[0] and "--webrtc_headless" not in args and
"-webrtc_headless" not in args and "-connect_host" not in args and
"--connect_host" not in args):
webrtc_headless_file = reach_tools_impl.download_webrtc_headless(workspace)
args = ["connect", "--webrtc_headless",
str(webrtc_headless_file)] + args[1:]
os.chdir(workspace)
subprocess.call([str(reach_file)] + args)
if __name__ == "__main__":
app.run(main)
| 32.938776 | 79 | 0.739777 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 805 | 0.498761 |
d7e72c05c821b5b2aa9289c98ad93b19e168816c | 1,487 | py | Python | issue_order/migrations/0005_auto_20170211_0033.py | jiejiang/courier | 6fdeaf041c77dba0f97e206adb7b0cded9674d3d | [
"Apache-2.0"
] | null | null | null | issue_order/migrations/0005_auto_20170211_0033.py | jiejiang/courier | 6fdeaf041c77dba0f97e206adb7b0cded9674d3d | [
"Apache-2.0"
] | 13 | 2020-02-12T02:56:24.000Z | 2022-01-13T01:23:08.000Z | issue_order/migrations/0005_auto_20170211_0033.py | jiejiang/courier | 6fdeaf041c77dba0f97e206adb7b0cded9674d3d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-02-11 00:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('issue_order', '0004_auto_20170210_2358'),
]
operations = [
migrations.AlterField(
model_name='courierbatch',
name='credit',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True, verbose_name='Credit'),
),
migrations.AlterField(
model_name='courierbatch',
name='rate',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True, verbose_name='Rate per Package'),
),
migrations.AlterField(
model_name='courierbatch',
name='state',
field=models.IntegerField(db_index=True, default=2, verbose_name='State'),
),
migrations.AlterField(
model_name='courierbatch',
name='system',
field=models.CharField(blank=True, choices=[('yunda', '\u97f5\u8fbe\u7ebf'), ('postal', '\u90ae\u653f\u7ebf')], db_index=True, max_length=32, null=True, verbose_name='System Name'),
),
migrations.AlterField(
model_name='courierbatch',
name='uuid',
field=models.CharField(blank=True, db_index=True, max_length=64, null=True, unique=True, verbose_name='UUID'),
),
]
| 36.268293 | 193 | 0.615333 | 1,329 | 0.893746 | 0 | 0 | 0 | 0 | 0 | 0 | 321 | 0.215871 |
d7e74a16bd1d49e08dbc231924a1101d9a8cb72e | 1,181 | py | Python | backend-project/small_eod/letters/migrations/0011_auto_20200618_1921.py | WlodzimierzKorza/small_eod | 027022bd71122a949a2787d0fb86518df80e48cd | [
"MIT"
] | 64 | 2019-12-30T11:24:03.000Z | 2021-06-24T01:04:56.000Z | backend-project/small_eod/letters/migrations/0011_auto_20200618_1921.py | WlodzimierzKorza/small_eod | 027022bd71122a949a2787d0fb86518df80e48cd | [
"MIT"
] | 465 | 2018-06-13T21:43:43.000Z | 2022-01-04T23:33:56.000Z | backend-project/small_eod/letters/migrations/0011_auto_20200618_1921.py | WlodzimierzKorza/small_eod | 027022bd71122a949a2787d0fb86518df80e48cd | [
"MIT"
] | 72 | 2018-12-02T19:47:03.000Z | 2022-01-04T22:54:49.000Z | # Generated by Django 3.0.7 on 2020-06-18 19:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('letters', '0010_remove_letter_address'),
]
operations = [
migrations.CreateModel(
name='DocumentType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Type of letter', max_length=256, verbose_name='Document type')),
],
),
migrations.RemoveField(
model_name='letter',
name='description',
),
migrations.RemoveField(
model_name='letter',
name='name',
),
migrations.DeleteModel(
name='Description',
),
migrations.AddField(
model_name='letter',
name='document_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='letters.DocumentType', verbose_name='Document type of letter.'),
),
]
| 31.078947 | 175 | 0.589331 | 1,055 | 0.893311 | 0 | 0 | 0 | 0 | 0 | 0 | 262 | 0.221846 |
d7e8137c5791af611e110bf8ad7f195a10e8a8ad | 776 | py | Python | classes/dns.py | double-beep/SmokeDetector | 2d90dd23bbe15536868b699e79d4f0551a60f092 | [
"Apache-2.0",
"MIT"
] | 464 | 2015-01-03T05:57:08.000Z | 2022-03-23T05:42:39.000Z | classes/dns.py | double-beep/SmokeDetector | 2d90dd23bbe15536868b699e79d4f0551a60f092 | [
"Apache-2.0",
"MIT"
] | 6,210 | 2015-01-03T05:37:36.000Z | 2022-03-31T09:31:45.000Z | classes/dns.py | double-beep/SmokeDetector | 2d90dd23bbe15536868b699e79d4f0551a60f092 | [
"Apache-2.0",
"MIT"
] | 322 | 2015-01-14T05:13:06.000Z | 2022-03-28T01:18:31.000Z | import dns
import dns.resolver
import dns.rdatatype
def dns_resolve(domain: str) -> list:
addrs = []
resolver = dns.resolver.Resolver(configure=False)
# Default to Google DNS
resolver.nameservers = ['8.8.8.8', '8.8.4.4']
try:
for answer in resolver.resolve(domain, 'A').response.answer:
for item in answer:
if item.rdtype == dns.rdatatype.A:
addrs.append(item.address)
except dns.resolver.NoAnswer:
pass
try:
for answer in resolver.resolve(domain, 'AAAA').response.answer:
for item in answer:
if item.rdtype == dns.rdatatype.AAAA:
addrs.append(item.address)
except dns.resolver.NoAnswer:
pass
return addrs
| 25.866667 | 71 | 0.594072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.064433 |
d7e86d3e4f86411bb690784d2bf229730c979562 | 8,039 | py | Python | messaging/kombu_brighter/kombu_messaging.py | iancooper/RpcIsEvil | aca8d506691791d7f24fcb561fcca92a54dc3058 | [
"MIT"
] | null | null | null | messaging/kombu_brighter/kombu_messaging.py | iancooper/RpcIsEvil | aca8d506691791d7f24fcb561fcca92a54dc3058 | [
"MIT"
] | null | null | null | messaging/kombu_brighter/kombu_messaging.py | iancooper/RpcIsEvil | aca8d506691791d7f24fcb561fcca92a54dc3058 | [
"MIT"
] | 2 | 2020-10-08T21:08:14.000Z | 2021-03-24T11:00:05.000Z | """"
File : kombu_messaging.py
Author : ian
Created : 09-28-2016
Last Modified By : ian
Last Modified On : 09-28-2016
***********************************************************************
The MIT License (MIT)
Copyright © 2016 Ian Cooper <ian_hammond_cooper@yahoo.co.uk>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the “Software”), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
**********************************************************************i*
"""
from kombu.message import Message as Message
from core.messaging import BrightsideMessage, BrightsideMessageHeader, BrightsideMessageBody, BrightsideMessageType
from core.exceptions import MessagingException
from uuid import UUID, uuid4
from typing import Dict
import re
import codecs
message_type_header = "MessageType"
message_id_header = "MessageId"
message_correlation_id_header = "CorrelationId"
message_topic_name_header = "Topic"
message_handled_count_header = "HandledCount"
message_delay_milliseconds_header = "x-delay"
message_delayed_milliseconds_header = "x-delay"
message_original_message_id_header = "x-original-message-id"
message_delivery_tag_header = "DeliveryTag"
# See http://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python
ESCAPE_SEQUENCE_RE = re.compile(r'''
( \\U........ # 8-digit hex escapes
| \\u.... # 4-digit hex escapes
| \\x.. # 2-digit hex escapes
| \\[0-7]{1,3} # Octal escapes
| \\N\{[^}]+\} # Unicode characters by name
| \\[\\'"abfnrtv] # Single-character escapes
)''', re.UNICODE | re.VERBOSE)
def decode_escapes(s):
def decode_match(match):
return codecs.decode(match.group(0), 'unicode-escape')
return ESCAPE_SEQUENCE_RE.sub(decode_match, s)
class ReadError:
def __init__(self, error_message: str) -> None:
self.error_message = error_message
def __str__(self) -> str:
return self.error_message
class BrightsideMessageFactory:
"""
The message factory turn an 'on-the-wire' message into our internal representation. We try to be as
tolerant as possible (following Postel's Law: https://en.wikipedia.org/wiki/Robustness_principle) Be conservative
in what you do, be liberal in what you accept
"""
def __init__(self):
self._has_read_errors = False
def create_message(self, message: Message) -> BrightsideMessage:
self._has_read_errors = False
def _get_correlation_id() -> UUID:
header, err = self._read_header(message_correlation_id_header, message)
if err is None:
return UUID(header)
else:
self._has_read_errors = True
return ""
def _get_message_id() -> UUID:
header, err = self._read_header(message_id_header, message)
if err is None:
return UUID(header)
else:
self._has_read_errors = True
return uuid4()
def _get_message_type() -> BrightsideMessageType:
header, err = self._read_header(message_type_header, message)
if err is None:
return BrightsideMessageType(header)
else:
self._has_read_errors = True
return BrightsideMessageType.unacceptable
def _get_payload() -> str:
body, err = self._read_payload(message)
if err is None:
return body
else:
self._has_read_errors = True
return ""
def _get_payload_type() -> str:
payload_type, err = self._read_payload_type(message)
if err is None:
return payload_type
else:
self._has_read_errors = True
return ""
def _get_topic() -> str:
header, err = self._read_header(message_topic_name_header, message)
if err is None:
return header
else:
self._has_read_errors = True
return ""
message_id = _get_message_id()
topic = _get_topic()
message_type = _get_message_type() if not message.errors or self._has_read_errors else BrightsideMessageType.unacceptable
correlation_id = _get_correlation_id()
payload = _get_payload()
payload_type = _get_payload_type()
message_header = BrightsideMessageHeader(identity=message_id, topic=topic, message_type=message_type,
correlation_id=correlation_id, content_type="json")
message_body = BrightsideMessageBody(body=payload, body_type=payload_type)
return BrightsideMessage(message_header, message_body)
# All of these methods are warned as static, implies they should be helper classes that take state in constructor
def _read_header(self, header_key: str, message: Message) -> (str, ReadError):
if header_key not in message.headers.keys():
return "", ReadError("Could not read header with key: {}".format(header_key))
else:
return message.headers.get(header_key), None
def _read_payload(self, message: Message) -> (str, ReadError):
if not message.errors:
body_text = decode_escapes(message.body)
return body_text[1:-1], None
else:
errors = ", ".join(message.errors)
return "", ReadError("Could not parse message. Errors: {}".format(errors))
def _read_payload_type(self, message: Message) -> (str, ReadError):
if not message.errors:
return message.content_type, None
else:
errors = ", ".join(message.errors)
return "", ReadError("Could not read payload type. Errors: {}".format(errors))
class KombuMessageFactory():
def __init__(self, message: BrightsideMessage) -> None:
self._message = message
def create_message_header(self) -> Dict:
def _add_correlation_id(brightstide_message_header: Dict, correlation_id: UUID) -> None:
if correlation_id is not None:
brightstide_message_header[message_correlation_id_header] = correlation_id
def _add_message_id(brightside_message_header: Dict, identity: UUID) -> None:
if identity is None:
raise MessagingException("Missing id on message, this is a required field")
brightside_message_header[message_id_header] = identity
def _add_message_type(brightside_message_header: Dict, brightside_message_type: BrightsideMessageType) -> None:
if brightside_message_type is None:
raise MessagingException("Missing type on message, this is a required field")
brightside_message_header[message_type_header] = brightside_message_type
header = {}
_add_message_id(header, str(self._message.header.id))
_add_message_type(header, self._message.header.message_type.value)
_add_correlation_id(header, str(self._message.header.correlation_id))
return header
| 40.396985 | 129 | 0.65941 | 5,333 | 0.662649 | 0 | 0 | 0 | 0 | 0 | 0 | 2,560 | 0.318091 |
d7e9f1211de6068417b958ad35db8e04d66e800a | 2,185 | py | Python | evaluation/lcnn/postprocess.py | mlpc-ucsd/LETR | 6022fbd9df65569f4a82b1ac065bee8f26fc4ca6 | [
"Apache-2.0"
] | 90 | 2021-04-16T05:06:05.000Z | 2022-03-30T03:03:47.000Z | evaluation/lcnn/postprocess.py | mlpc-ucsd/LETR | 6022fbd9df65569f4a82b1ac065bee8f26fc4ca6 | [
"Apache-2.0"
] | 11 | 2021-05-06T21:02:33.000Z | 2022-03-22T09:28:39.000Z | evaluation/lcnn/postprocess.py | mlpc-ucsd/LETR | 6022fbd9df65569f4a82b1ac065bee8f26fc4ca6 | [
"Apache-2.0"
] | 14 | 2021-05-07T14:31:43.000Z | 2022-03-18T01:32:27.000Z | import numpy as np
def pline(x1, y1, x2, y2, x, y):
px = x2 - x1
py = y2 - y1
dd = px * px + py * py
u = ((x - x1) * px + (y - y1) * py) / max(1e-9, float(dd))
dx = x1 + u * px - x
dy = y1 + u * py - y
return dx * dx + dy * dy
def psegment(x1, y1, x2, y2, x, y):
px = x2 - x1
py = y2 - y1
dd = px * px + py * py
u = max(min(((x - x1) * px + (y - y1) * py) / float(dd), 1), 0)
dx = x1 + u * px - x
dy = y1 + u * py - y
return dx * dx + dy * dy
def plambda(x1, y1, x2, y2, x, y):
px = x2 - x1
py = y2 - y1
dd = px * px + py * py
return ((x - x1) * px + (y - y1) * py) / max(1e-9, float(dd))
def postprocess(lines, scores, threshold=0.01, tol=1e9, do_clip=False):
nlines, nscores = [], []
for (p, q), score in zip(lines, scores):
start, end = 0, 1
for a, b in nlines: # nlines: Selected lines.
if (
min(
max(pline(*p, *q, *a), pline(*p, *q, *b)),
max(pline(*a, *b, *p), pline(*a, *b, *q)),
)
> threshold ** 2
):
continue
lambda_a = plambda(*p, *q, *a)
lambda_b = plambda(*p, *q, *b)
if lambda_a > lambda_b:
lambda_a, lambda_b = lambda_b, lambda_a
lambda_a -= tol
lambda_b += tol
# case 1: skip (if not do_clip)
if start < lambda_a and lambda_b < end:
continue
# not intersect
if lambda_b < start or lambda_a > end:
continue
# cover
if lambda_a <= start and end <= lambda_b:
start = 10
break
# case 2 & 3:
if lambda_a <= start and start <= lambda_b:
start = lambda_b
if lambda_a <= end and end <= lambda_b:
end = lambda_a
if start >= end:
break
if start >= end:
continue
nlines.append(np.array([p + (q - p) * start, p + (q - p) * end]))
nscores.append(score)
return np.array(nlines), np.array(nscores)
| 28.012821 | 73 | 0.430664 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.041648 |
d7ea13a7ea555e525798ba44cfdbaf22effff83f | 505 | py | Python | core-python/Core_Python/com/LambdaWithFilterMapReduce.py | theumang100/tutorials-1 | 497f54c2adb022c316530319a168fca1c007d4b1 | [
"MIT"
] | 9 | 2020-04-23T05:24:19.000Z | 2022-02-17T16:37:51.000Z | core-python/Core_Python/com/LambdaWithFilterMapReduce.py | theumang100/tutorials-1 | 497f54c2adb022c316530319a168fca1c007d4b1 | [
"MIT"
] | 5 | 2020-10-01T05:08:37.000Z | 2020-10-12T03:18:10.000Z | core-python/Core_Python/com/LambdaWithFilterMapReduce.py | theumang100/tutorials-1 | 497f54c2adb022c316530319a168fca1c007d4b1 | [
"MIT"
] | 9 | 2020-04-28T14:06:41.000Z | 2021-10-19T18:32:28.000Z | from functools import reduce
from math import fsum
n = int(input("How many number you want to enter for even and odd numbers ? : "))
lst = []
for i in range(n):
j = int(input("Enter any positive number : "))
lst.append(j)
evens = list(filter(lambda i: i % 2 == 0, lst))
print("Number of even numbers from list is : ", evens)
doubles = list(map(lambda i: i+i,evens))
print("Double even values : ",doubles)
print("Sum of all even numbers after doubling them : ",reduce(lambda a,b : a+b,doubles))
| 31.5625 | 88 | 0.677228 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 206 | 0.407921 |
d7eae4be235cae46ed85305fac6878bbc9f96d3d | 3,315 | py | Python | prov_interop/tests/test_comparator.py | softwaresaved/provtoolsuite-interop-test-harness | 1d795802b26999897d5b3136a8245022fa13cce3 | [
"MIT"
] | null | null | null | prov_interop/tests/test_comparator.py | softwaresaved/provtoolsuite-interop-test-harness | 1d795802b26999897d5b3136a8245022fa13cce3 | [
"MIT"
] | null | null | null | prov_interop/tests/test_comparator.py | softwaresaved/provtoolsuite-interop-test-harness | 1d795802b26999897d5b3136a8245022fa13cce3 | [
"MIT"
] | 1 | 2021-12-17T10:00:11.000Z | 2021-12-17T10:00:11.000Z | """Unit tests for :mod:`prov_interop.comparator`.
"""
# Copyright (c) 2015 University of Southampton
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import tempfile
import unittest
from prov_interop import standards
from prov_interop.component import ConfigError
from prov_interop.comparator import Comparator
from prov_interop.comparator import ComparisonError
class ComparatorTestCase(unittest.TestCase):
def setUp(self):
super(ComparatorTestCase, self).setUp()
self.comparator = Comparator()
self.file1 = None
self.file2 = None
self.formats = [standards.PROVN, standards.JSON]
self.config = {Comparator.FORMATS: self.formats}
def tearDown(self):
super(ComparatorTestCase, self).tearDown()
for tmp in [self.file1, self.file2]:
if tmp != None and os.path.isfile(tmp):
os.remove(tmp)
def test_init(self):
self.assertEqual([], self.comparator.formats)
def test_configure(self):
self.comparator.configure(self.config)
self.assertEqual(self.formats, self.comparator.formats)
def test_configure_non_dict_error(self):
with self.assertRaises(ConfigError):
self.comparator.configure(123)
def test_configure_no_formats(self):
with self.assertRaises(ConfigError):
self.comparator.configure({})
def test_configure_non_canonical_format(self):
self.config[Comparator.FORMATS].append("invalidFormat")
with self.assertRaises(ConfigError):
self.comparator.configure(self.config)
def test_compare_missing_file1(self):
self.file1 = "nosuchfile." + standards.JSON
(_, self.file2) = tempfile.mkstemp(suffix="." + standards.JSON)
with self.assertRaises(ComparisonError):
self.comparator.compare(self.file1, self.file2)
def test_compare_missing_file2(self):
(_, self.file1) = tempfile.mkstemp(suffix="." + standards.JSON)
self.file2 = "nosuchfile." + standards.JSON
with self.assertRaises(ComparisonError):
self.comparator.compare(self.file1, self.file2)
def test_check_format_invalid_format(self):
self.comparator.configure(self.config)
with self.assertRaises(ComparisonError):
self.comparator.check_format("nosuchformat")
| 37.247191 | 70 | 0.75083 | 1,812 | 0.546606 | 0 | 0 | 0 | 0 | 0 | 0 | 1,205 | 0.363499 |
d7eb837a01bcf4d82ba4037dbb7897de0570733c | 26 | py | Python | tests/auctionbets/test_hello_world.py | sam-bailey/auctionbets | 237f2c4d1cb2e3ba2e3194aab35ec85b7bd565f4 | [
"MIT"
] | null | null | null | tests/auctionbets/test_hello_world.py | sam-bailey/auctionbets | 237f2c4d1cb2e3ba2e3194aab35ec85b7bd565f4 | [
"MIT"
] | 4 | 2021-04-11T15:06:50.000Z | 2021-04-11T19:11:43.000Z | tests/melvin/test_hello_world.py | sam-bailey/melvin | 562bd17d84d78f54eb93b77d6aa8c72556a0a31f | [
"MIT"
] | null | null | null | print("Test hello world")
| 13 | 25 | 0.730769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.692308 |
d7ebca64e3e1b4cb30d3b721f70f2e23502dcb8a | 3,564 | py | Python | monai/_extensions/loader.py | tatuanb/monai_V1 | 41e492b61c78bb3c303f38b03fe9fdc74a3c2e96 | [
"Apache-2.0"
] | 2,971 | 2019-10-16T23:53:16.000Z | 2022-03-31T20:58:24.000Z | monai/_extensions/loader.py | catherine1996cn/MONAI | ff9bbfa82763de46cbac75553e340633e3d84ecb | [
"Apache-2.0"
] | 2,851 | 2020-01-10T16:23:44.000Z | 2022-03-31T22:14:53.000Z | monai/_extensions/loader.py | catherine1996cn/MONAI | ff9bbfa82763de46cbac75553e340633e3d84ecb | [
"Apache-2.0"
] | 614 | 2020-01-14T19:18:01.000Z | 2022-03-31T14:06:14.000Z | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import platform
from _thread import interrupt_main
from contextlib import contextmanager
from glob import glob
from os import path
from threading import Timer
from typing import Optional
import torch
from monai.utils.module import get_torch_version_tuple, optional_import
dir_path = path.dirname(path.realpath(__file__))
@contextmanager
def timeout(time, message):
timer = None
try:
timer = Timer(time, interrupt_main)
timer.daemon = True
yield timer.start()
except KeyboardInterrupt as e:
if timer is not None and timer.is_alive():
raise e # interrupt from user?
raise TimeoutError(message) from e
finally:
if timer is not None:
try:
timer.cancel()
finally:
pass
def load_module(
module_name: str, defines: Optional[dict] = None, verbose_build: bool = False, build_timeout: int = 300
):
"""
Handles the loading of c++ extension modules.
Args:
module_name: Name of the module to load.
Must match the name of the relevant source directory in the `_extensions` directory.
defines: Dictionary containing names and values of compilation defines.
verbose_build: Set to true to enable build logging.
build_timeout: Time in seconds before the build will throw an exception to prevent hanging.
"""
# Ensuring named module exists in _extensions directory.
module_dir = path.join(dir_path, module_name)
if not path.exists(module_dir):
raise ValueError(f"No extension module named {module_name}")
platform_str = f"_{platform.system()}_{platform.python_version()}_"
platform_str += "".join(f"{v}" for v in get_torch_version_tuple()[:2])
# Adding configuration to module name.
if defines is not None:
module_name = "_".join([module_name] + [f"{v}" for v in defines.values()])
# Gathering source files.
source = glob(path.join(module_dir, "**", "*.cpp"), recursive=True)
if torch.cuda.is_available():
source += glob(path.join(module_dir, "**", "*.cu"), recursive=True)
platform_str += f"_{torch.version.cuda}"
# Constructing compilation argument list.
define_args = [] if not defines else [f"-D {key}={defines[key]}" for key in defines]
# Ninja may be blocked by something out of our control.
# This will error if the build takes longer than expected.
with timeout(build_timeout, "Build appears to be blocked. Is there a stopped process building the same extension?"):
load, _ = optional_import("torch.utils.cpp_extension", name="load") # main trigger some JIT config in pytorch
# This will either run the build or return the existing .so object.
name = module_name + platform_str.replace(".", "_")
module = load(
name=name, sources=source, extra_cflags=define_args, extra_cuda_cflags=define_args, verbose=verbose_build
)
return module
| 39.164835 | 120 | 0.693603 | 0 | 0 | 459 | 0.128788 | 475 | 0.133277 | 0 | 0 | 1,739 | 0.487935 |
d7ec3af0754886496ab74d63932b32e5dda81ec2 | 547 | py | Python | resources/__init__.py | tiralinka/amazon_fires | bda8cb2a6910be17e9cbbfb4f214a2b019efd145 | [
"MIT"
] | 1 | 2021-03-08T02:40:00.000Z | 2021-03-08T02:40:00.000Z | resources/__init__.py | tiralinka/amazon_fires | bda8cb2a6910be17e9cbbfb4f214a2b019efd145 | [
"MIT"
] | null | null | null | resources/__init__.py | tiralinka/amazon_fires | bda8cb2a6910be17e9cbbfb4f214a2b019efd145 | [
"MIT"
] | 2 | 2021-01-17T13:51:31.000Z | 2021-05-27T22:22:49.000Z | """
This package contains modules built specifically for the project in question.
Below are decribed the modules and packages used in the notebooks of this project.
Modules
-----------
polynomials:
| This module groups functions and classes for generating polynomials
| whether fitting data or directly ortogonal polynomials
| (Legendre polynomials).
plotter:
| This module groups functions for visualizations presented throughout the notebooks.
functk:
| This module exits outside this package and contains utilities functions.
""" | 28.789474 | 87 | 0.778793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 547 | 1 |
d7f03c5a45f729dea40c721a8160dc62f97f8e12 | 25,671 | py | Python | src/squad/common.py | nptdat/qanet | d092b5484347f2f3167d162cb56eeada5c5a109e | [
"MIT"
] | 12 | 2018-12-13T16:06:26.000Z | 2020-12-07T11:39:10.000Z | src/squad/common.py | nptdat/qanet | d092b5484347f2f3167d162cb56eeada5c5a109e | [
"MIT"
] | null | null | null | src/squad/common.py | nptdat/qanet | d092b5484347f2f3167d162cb56eeada5c5a109e | [
"MIT"
] | 2 | 2019-01-10T08:42:16.000Z | 2020-08-12T07:32:20.000Z | import os
from typing import List, Optional
import pickle
import numpy as np
from utilities import augment_long_text, tokenize, tokenize_long_text, to_chars, align
from config import Config as cf
PAD = 0 # TODO: choose appropriate index for these special chars
UNK = 1
DEFAULT = {'PAD': PAD, 'UNK': UNK}
DEFAULT_C = {'': PAD, 'UNK': UNK}
def word_lookup(w: str, table: dict, default=None):
"""
Translate a word into a value by looking up from a dict.
First priority is case-sensitive, then the next priority is case-insensitive.
In case the word does not exist in the dict, a KeyError exception is raised or a default value is returned.
Args:
w: word to translate
table: a dict to translate the word by looking up
default: If not None, this is the value to return in case the word does not exist in the table.
Returns:
Translated value for the word by looking up the word into table.
"""
if w in table: # Match word in case-sentitive mode is the first priority
return table[w]
elif w.lower() in table: # Then, case-insensitive
return table[w.lower()]
else:
if default is not None:
return default
else:
raise KeyError('Key `{}` not found'.format(w))
def char_lookup(c: str, table: dict, default=None):
"""
Translate a char into a value by looking up from a dict.
Args:
c: char to translate
table: a dict to translate the char by looking up
default: If not None, this is the value to return in case the char does not exist in the table.
Returns:
Translated value for the char by looking up the char into table.
"""
if c in table: # Match word in case-sentitive mode is the first priority
return table[c]
else:
if default is not None:
return default
else:
raise KeyError('Key `{}` not found'.format(c))
class Vocabulary(object):
def __init__(self, wv: dict, char_vocab: set):
offset = len(DEFAULT)
w2id = {w: idx+offset for idx, w in enumerate(wv.keys())}
w2id.update(DEFAULT)
id2w = {i:w for w, i in w2id.items()}
c2id = {c: idx+offset for idx, c in enumerate(list(char_vocab))}
c2id.update(DEFAULT_C)
id2c = {i:c for c, i in c2id.items()}
self.wv = wv
self.emb_size = len(wv['the']) # most common word that absolutely appears in the dict
self.w2id = w2id # mapping word to index
self.id2w = id2w # mapping index to word
self.c2id = c2id # mapping char to index
self.id2c = id2c # mapping index to char
def vectorize(self, tokens: List[str], length: int):
"""
Convert list of text tokens into list of indices
"""
vect = [word_lookup(t, self.w2id, default=UNK) for t in tokens]
vect = vect[:length]
if len(vect) < length:
vect.extend([PAD]*(length-len(vect)))
return vect
def vectorize_c(self, chars_list: List[List[str]], length: int, w_length: int):
"""
Convert list of list of chars into list of index-based representation
"""
vects = []
PAD_VECT = [PAD]*w_length
for chars in chars_list:
vects.append([char_lookup(c, self.c2id, default=UNK) for c in chars])
vects = vects[:length]
while len(vects) < length:
vects.append(PAD_VECT)
return vects
def get_embed_weights(self):
"""
Build weights for a word embedding layer.
Note that pre-trained word embedding is used, so no need to parameterize embed_size.
Args:
emb_size: Dim of the vectors
Returns:
[N, emb_size] matrix, where N is number of VOCAB + 1 (for pad)
"""
emb_size = len(self.wv[list(self.wv.keys())[0]])
weights = np.zeros((len(self.id2w), emb_size))
for i, tok in self.id2w.items():
if tok in self.wv:
weights[i] = self.wv[tok]
else:
weights[i] = np.random.uniform(0.0, 1.0, [emb_size])
return weights
def get_char_embed_weights(self, emb_size=64):
"""
Initialize weights for char embedding layer.
Args:
emb_size: Dim of the vectors
Returns:
[len(id2c), emb_size] matrix
"""
weights = emb = np.random.uniform(0.0, 1.0, size=(len(self.id2c), emb_size))
return weights
@property
def vocab_size(self):
return len(self.w2id)
def __getitem__(self, idx):
"""
Get vector for a word.
"""
if not isinstance(idx, str):
raise ValueError('Index must be a string')
return word_lookup(idx, self.wv, default=None)
def __contains__(self, idx):
if not isinstance(idx, str):
raise ValueError('Index must be a string')
return idx in self.wv or idx.lower() in self.wv
class Span(object):
def __init__(self, start_idx: int, end_idx: int):
self.start = start_idx # index of the start token in context
self.end = end_idx # index of the end token in context
@classmethod
def allocate(cls, anchors: List[int], start_char: int, end_char: int):
start_idx = 0
while anchors[start_idx] < start_char:
start_idx += 1
if anchors[start_idx] > start_char:
start_idx -= 1
end_idx = start_idx
while end_idx < len(anchors) and anchors[end_idx] <= end_char:
end_idx += 1
end_idx -= 1
return Span(start_idx, end_idx)
def __str__(self):
return "({}, {})".format(self.start, self.end)
class Answer(object):
def __init__(self, answer_text: str, answer_toks: List[str], span: Span, answer_start: int):
self.answer_text = answer_text # original answer text in JSON
self.answer_toks = answer_toks # tokens of the original answer text
self.answer_chars = to_chars(answer_toks, cf.WORD_LEN, cf.PAD_CHAR) # list of chars of the answer text
self.span = span # The span (token-based index) of the answer in context
self.answer_start = answer_start # start character in original answer text
def vectorize(self, vocab: Vocabulary):
self.answer: List[int] = vocab.vectorize(self.answer_toks, cf.ANSWER_LEN)
self.answer_c: List[List[int]] = vocab.vectorize_c(self.answer_chars, cf.ANSWER_LEN, cf.WORD_LEN)
@classmethod
def parse_json(cls, answers_js: List[dict], context: str, context_toks: List[str], anchors: List[int]):
answers = []
for ans in answers_js:
ans_text = ans['text']
ans_start = ans['answer_start']
ans_toks = tokenize(ans_text)
# Identify the span from context, ans_text & start index
span = Span.allocate(anchors, ans_start, ans_start+len(ans_text)-1)
answers.append(Answer(ans_text, ans_toks, span, ans_start))
return answers
class Question(object):
def __init__(self, question_text: str, ques_id: str, question: List[str], answers: List[Answer], plausible_answers: List[Answer]):
self.question_text = question_text # original question text in JSON
self.question_toks = question # tokens of the original question text
self.question_chars = to_chars(question, cf.WORD_LEN, cf.PAD_CHAR) # list of chars of the question text
self.answers = answers # list of Answer object of the question
self.ques_id = ques_id # id of the question in JSON
self.plausible_answers = plausible_answers
self.paragraph = None # handle to the parent paragraph
def set_paragraph(self, paragraph):
self.paragraph = paragraph
def vectorize(self, vocab: Vocabulary):
self.question: List[int] = vocab.vectorize(self.question_toks, cf.QUERY_LEN)
self.question_c: List[List[int]] = vocab.vectorize_c(self.question_chars, cf.QUERY_LEN, cf.WORD_LEN)
for answer in self.answers:
answer.vectorize(vocab)
class Paragraph(object):
def __init__(self, raw_context: str, context_text: str, context_toks: List[str], questions: List[Question], para_idx: int, anchors: List[int]):
self.raw_context = raw_context # original context text in JSON
self.context_text = context_text # augmented from original context text with SPACES to guide the tokenization
self.context_toks = context_toks # tokens of the context text
self.context_chars = to_chars(context_toks, cf.WORD_LEN, cf.PAD_CHAR) # chars of the context
self.questions = questions # list of Question objects
self.local_word_vocab = self._build_local_word_vocab()
self.local_char_vocab = self._build_local_char_vocab()
self.para_idx = para_idx # Just for management & debug. Not used in experiment.
self.anchors = anchors
def _build_local_word_vocab(self):
local_vocab = set()
local_vocab = local_vocab.union(set(self.context_toks))
for question in self.questions:
local_vocab = local_vocab.union(set(question.question_toks))
for answer in question.answers + question.plausible_answers:
local_vocab = local_vocab.union(set(answer.answer_toks))
return local_vocab
def _build_local_char_vocab(self):
def char_set(tokens):
chars = set()
for tok in tokens:
chars = chars.union(set(tok))
return chars
char_vocab = set()
char_vocab = char_vocab.union(char_set(self.context_chars))
for question in self.questions:
char_vocab = char_vocab.union(char_set(question.question_chars))
for answer in question.answers + question.plausible_answers:
char_vocab = char_vocab.union(char_set(answer.answer_chars))
return char_vocab
@classmethod
def parse_json(cls, para_js: dict, para_idx: int):
# Accumulate all answers' tokens first
all_para_answers = []
for q in para_js['qas']:
if 'answers' in q:
all_para_answers.extend([ans for ans in q['answers']])
if 'plausible_answers' in q:
all_para_answers.extend([ans for ans in q['plausible_answers']])
# Improve the context for better tokenization
raw_context = para_js['context']
# context = augment_long_text(para_js['context'], all_para_answers)
context = raw_context
context_toks = tokenize_long_text(context)
context_toks = [t.strip(' ') for t in context_toks]
anchors = align(raw_context, context_toks)
questions = []
for q in para_js['qas']:
question_text = q['question']
q_toks = tokenize(question_text)
ques_id = q['id']
answers = Answer.parse_json(q['answers'], raw_context, context_toks, anchors) if 'answers' in q else []
plausible_answers = Answer.parse_json(q['plausible_answers'], raw_context, context_toks, anchors) if 'plausible_answers' in q else []
questions.append(Question(question_text, ques_id, q_toks, answers, plausible_answers))
para = Paragraph(raw_context, context, context_toks, questions, para_idx, anchors)
for ques in questions:
ques.set_paragraph(para)
return para
def vectorize(self, vocab):
"""
Vectorize pargraph context, question text & answer text based on given vocab.
"""
self.context: List[int] = vocab.vectorize(self.context_toks, cf.CONTEXT_LEN)
self.context_c: List[List[int]] = vocab.vectorize_c(self.context_chars, cf.CONTEXT_LEN, cf.WORD_LEN)
for question in self.questions:
question.vectorize(vocab)
def exact_match(gt_s, gt_e, pr_s, pr_e):
"""
Evaluate exact match of a predicted span over a ground truth span.
Args:
gt_s: index of the ground truth start position
gt_e: index of the ground truth end position
pr_s: index of the predicted start position
pr_e: index of the predicted end position
"""
return gt_s == pr_s and gt_e == pr_e
def f1(gt_s, gt_e, pr_s, pr_e):
"""
Evaluate F1 score of a predicted span over a ground truth span.
Args:
gt_s: index of the ground truth start position
gt_e: index of the ground truth end position
pr_s: index of the predicted start position
pr_e: index of the predicted end position
"""
gt = {idx for idx in range(gt_s, gt_e+1)}
pr = {idx for idx in range(pr_s, pr_e+1)}
intersection = gt.intersection(pr)
prec = 1. * len(intersection) / len(pr)
rec = 1. * len(intersection) / len(gt)
f1_score = (2. * prec * rec) / (prec+rec) if prec+rec != 0. else 0.
return f1_score
def get_score(metric, gt_starts, gt_ends, pred_start, pred_end):
"""
Args:
metric: a metric function to calculate the score (exact_match or f1_score)
gt_starts: (list) an array of start indices of the available answers
gt_ends: (list) an array of end indices of the available answers
pred_start: (int) predicted start index returned by a model
pred_end: (int) predicted end index returned by a model
Returns:
The best score of the metric evaluated on multiple answer spans.
"""
scores = []
for gt_s, gt_e in zip(gt_starts, gt_ends):
scores.append(metric(gt_s, gt_e, pred_start, pred_end))
return 1.0 * np.max(scores)
class SquadData(object):
"""
To save the whole object to pickle file:
```python
data.save('data/squad_processed.pkl')
```
To load the whole object from pickle file, and extract train & validation data
```python
data = SquadData.load('data/squad_processed.pkl')
ques_ids_train, X_train, y_train = data.train_data()
ques_ids_valid, X_valid, y_valid = data.validation_data()
```
To save structured data to binary files for fast loading:
```python
data.save(np_path='data/numpy')
```
To load numpy data from binary files:
```python
word_vectors, char_vectors, ques_ids_train, X_train, y_train, ques_ids_valid, X_valid, y_valid = SquadData.load(np_path='data/numpy')
```
"""
def __init__(self, train_paragraphs: List[Paragraph], dev_paragraphs: List[Paragraph], vocab: Vocabulary, squad_words: set, squad_chars: set):
"""
Initializer.
Args:
train_paragraphs: list of Paragraph objects from train data
dev_paragraphs: list of Paragraph objects from dev data
vocab: Vocabulary object which store vectors of words appearing in Squad data
squad_words: set of all tokens appearing in Squad data (context, question text, answer text).
Note that some tokens may not appear in vocab. They are treated as unknown words.
Note that this is a set of words, so it must not be used to map words to indices. Use Vocabulary.w2id instead.
squad_chars: set of all characters appearing in Squad data (context, question text, answer text).
"""
self.train_paragraphs = train_paragraphs
self.dev_paragraphs = dev_paragraphs
self.vocab = vocab
self.squad_words = squad_words
self.squad_chars = squad_chars
def summary(self):
print('Num of train paragraphs: {}'.format(len(self.train_paragraphs)))
print('Num of dev paragraphs: {}'.format(len(self.dev_paragraphs)))
print('Num words in vocab: {}'.format(self.vocab.vocab_size))
print('Num unique words: {}'.format(len(self.squad_words)))
print('Num unique chars: {}'.format(len(self.squad_chars)))
unknown_words = [w for w in self.squad_words if w not in self.vocab]
print('Num of unknown words: {}'.format(len(unknown_words)))
def _generate_data(self, paragraphs, dataset: str ='train'):
ques_ids = []
contextw_inp, queryw_inp, contextc_inp, queryc_inp = [], [], [], []
p1, p2, start, end = [], [], [], []
long_count = 0
for para in paragraphs:
for ques in para.questions:
if dataset == 'train':
for ans in ques.answers:
if ans.span.start >= cf.CONTEXT_LEN or ans.span.end >= cf.CONTEXT_LEN:
# print('ques.ques_id:', ques.ques_id, ',', 'ans.span.start, end:', ans.span.start, ',', ans.span.end)
long_count += 1
continue
ques_ids.append(ques.ques_id)
contextw_inp.append(para.context)
queryw_inp.append(ques.question)
contextc_inp.append(para.context_c)
queryc_inp.append(ques.question_c)
vect = np.zeros(cf.CONTEXT_LEN, dtype=np.float16)
vect[ans.span.start] = 1.
p1.append(vect)
vect = np.zeros(cf.CONTEXT_LEN, dtype=np.float16)
vect[ans.span.end] = 1.
p2.append(vect)
start.append(ans.span.start)
end.append(ans.span.end)
else: # dev dataset
ques_ids.append(ques.ques_id)
contextw_inp.append(para.context)
queryw_inp.append(ques.question)
contextc_inp.append(para.context_c)
queryc_inp.append(ques.question_c)
start_list = []
end_list = []
for ans in ques.answers:
if ans.span.start >= cf.CONTEXT_LEN or ans.span.end >= cf.CONTEXT_LEN:
long_count += 1
continue
start_list.append(ans.span.start)
end_list.append(ans.span.end)
# p1, p2 are ignored in dev set
start.append(start_list)
end.append(end_list)
print('There are {} long answers'.format(long_count))
ques_ids = np.array(ques_ids)
contextw_inp, queryw_inp, contextc_inp, queryc_inp = np.array(contextw_inp), np.array(queryw_inp), np.array(contextc_inp), np.array(queryc_inp)
p1, p2, start, end = np.array(p1), np.array(p2), np.array(start), np.array(end)
return (ques_ids, [contextw_inp, queryw_inp, contextc_inp, queryc_inp], [p1, p2, start, end])
def train_data(self):
return self._generate_data(self.train_paragraphs)
def validation_data(self):
return self._generate_data(self.dev_paragraphs, dataset='dev')
def search_paragraph(self, para_idx: int, dataset: str ='train'):
"""
Search for paragraph by index. This function is used for debug only.
"""
paragraphs = self.train_paragraphs if dataset == 'train' else self.dev_paragraphs
for para in paragraphs:
if para.para_idx == para_idx:
return para
return None
def search_question(self, ques_id: str, dataset: str ='train'):
"""
Search for question by ques_id. This function is used for debug only.
"""
paragraphs = self.train_paragraphs if dataset == 'train' else self.dev_paragraphs
for para in paragraphs:
for ques in para.questions:
if ques.ques_id == ques_id:
return ques
return None
@classmethod
def evaluate(cls, gt_start_list, gt_end_list, pred_starts, pred_ends):
"""
Evaluate ExactMatch score & F1 score of predictions on a validation set.
Args:
gt_start_list: list of start indices of multiple ground-truth answer spans
gt_start_list: list of end indices of multiple ground-truth answer spans
pred_starts: list of predicted start indices
pred_ends: list of predicted end indices
Returns:
A hash with 2 keys: 'exact_match' & 'f1'
"""
em_score = 0
f1_score = 0
total = 0
for gt_starts, gt_ends, pred_start, pred_end in zip(gt_start_list, gt_end_list, pred_starts, pred_ends):
if len(gt_starts) > 0:
em_score += get_score(exact_match, gt_starts, gt_ends, pred_start, pred_end)
f1_score += get_score(f1, gt_starts, gt_ends, pred_start, pred_end)
# If gt_starts is empty, the ground-truth answer is over the limit length of the input text.
# We give penalty for that case, that means we give 0 to EM & F1 while we increase the total.
total += 1
em_score = 100. * em_score / total
f1_score = 100. * f1_score / total
em_score, f1_score
return {
'exact_match': em_score,
'f1': f1_score
}
def save(self, filepath=None, np_path=None):
def save_data(prefix, ques_ids,
contextw, queryw, contextc, queryc,
p1, p2, start, end):
np.save(np_path + '/%s_ques_ids.npy' % prefix, ques_ids)
np.save(np_path + '/%s_contextw.npy' % prefix, contextw)
np.save(np_path + '/%s_queryw.npy' % prefix, queryw)
np.save(np_path + '/%s_contextc.npy' % prefix, contextc)
np.save(np_path + '/%s_queryc.npy' % prefix, queryc)
np.save(np_path + '/%s_p1.npy' % prefix, p1)
np.save(np_path + '/%s_p2.npy' % prefix, p2)
np.save(np_path + '/%s_start.npy' % prefix, start)
np.save(np_path + '/%s_end.npy' % prefix, end)
if filepath: # Save the SquadData object to pickle file (slow)
print('Saving squad data to {}...'.format(filepath))
with open(filepath, 'wb') as f:
pickle.dump(self, f)
else: # Save the binary data to *.npy files (fast)
print('Accumulating train & validation arrays from the structure...')
t_ques_ids, X_train, y_train = self.train_data()
v_ques_ids, X_valid, y_valid = self.validation_data()
t_contextw, t_queryw, t_contextc, t_queryc = X_train
t_p1, t_p2, t_start, t_end = y_train
v_contextw, v_queryw, v_contextc, v_queryc = X_valid
v_p1, v_p2, v_start, v_end = y_valid
if not os.path.exists(np_path):
os.makedirs(np_path)
print('Saving word vectors into numpy files...')
word_vectors = self.vocab.get_embed_weights()
char_vectors = self.vocab.get_char_embed_weights()
np.save(np_path + '/word_vectors.npy', word_vectors)
np.save(np_path + '/char_vectors.npy', char_vectors)
print('Saving train arrays into numpy files...')
save_data(
'train', t_ques_ids,
t_contextw, t_queryw, t_contextc, t_queryc,
t_p1, t_p2, t_start, t_end)
print('Saving validation arrays into numpy files...')
save_data(
'val', v_ques_ids,
v_contextw, v_queryw, v_contextc, v_queryc,
v_p1, v_p2, v_start, v_end)
@classmethod
def load(cls, filepath=None, np_path=None):
def load_data(prefix):
ques_ids = np.load(np_path + '/%s_ques_ids.npy' % prefix)
contextw = np.load(np_path + '/%s_contextw.npy' % prefix)
queryw = np.load(np_path + '/%s_queryw.npy' % prefix)
contextc = np.load(np_path + '/%s_contextc.npy' % prefix)
queryc = np.load(np_path + '/%s_queryc.npy' % prefix)
p1 = np.load(np_path + '/%s_p1.npy' % prefix)
p2 = np.load(np_path + '/%s_p2.npy' % prefix)
start = np.load(np_path + '/%s_start.npy' % prefix)
end = np.load(np_path + '/%s_end.npy' % prefix)
return ques_ids, contextw, queryw, contextc, queryc, p1, p2, start, end
if filepath: # Load SquadData object from pickle file (slow)
print('Loading squad data from pickle file {}...'.format(filepath))
with open(filepath, 'rb') as f:
return pickle.load(f)
else: # Load binary data from *.npy files (fast)
print('Loading word vectors from numpy files...')
word_vectors = np.load(np_path + '/word_vectors.npy')
char_vectors = np.load(np_path + '/char_vectors.npy')
print('Loading train arrays from numpy files...')
t_ques_ids, t_contextw, t_queryw, t_contextc, t_queryc, t_p1, t_p2, t_start, t_end = load_data('train')
print('Loading validation arrays from numpy files...')
v_ques_ids, v_contextw, v_queryw, v_contextc, v_queryc, v_p1, v_p2, v_start, v_end = load_data('val')
return [
word_vectors,
char_vectors,
t_ques_ids,
[t_contextw, t_queryw, t_contextc, t_queryc],
[t_p1, t_p2, t_start, t_end],
v_ques_ids,
[v_contextw, v_queryw, v_contextc, v_queryc],
[v_p1, v_p2, v_start, v_end]
]
| 41.271704 | 151 | 0.603794 | 21,922 | 0.85396 | 0 | 0 | 5,887 | 0.229325 | 0 | 0 | 7,975 | 0.310662 |
d7f060c23ded2d452cc244af1abc7de16616e4f3 | 1,332 | py | Python | test/test_mean_functions.py | cics-nd/gptorch | 80c62a227c466bb7fa29e11263e94c41f96ff93f | [
"MIT"
] | 28 | 2018-11-05T03:01:18.000Z | 2021-04-02T18:11:05.000Z | test/test_mean_functions.py | cics-nd/gptorch | 80c62a227c466bb7fa29e11263e94c41f96ff93f | [
"MIT"
] | 7 | 2019-06-04T21:43:40.000Z | 2021-11-04T04:19:26.000Z | test/test_mean_functions.py | cics-nd/gptorch | 80c62a227c466bb7fa29e11263e94c41f96ff93f | [
"MIT"
] | 8 | 2019-04-03T12:28:05.000Z | 2021-12-23T10:15:34.000Z | # File: test_mean_functions.py
# File Created: Saturday, 13th July 2019 3:51:40 pm
# Author: Steven Atkinson (steven@atkinson.mn)
import pytest
import torch
from gptorch import mean_functions
class TestConstant(object):
def test_init(self):
dy = 2
mean_functions.Constant(dy)
mean_functions.Constant(dy, val=torch.randn(dy))
with pytest.raises(ValueError):
# dim doesn't match val
mean_functions.Constant(dy, val=torch.Tensor([1.0]))
def test_forward(self):
n, dx, dy = 5, 3, 2
y = mean_functions.Constant(dy)(torch.rand(n, dx))
assert isinstance(y, torch.Tensor)
assert all([e == a for e, a in zip(y.flatten(), torch.zeros(n, dy).flatten())])
val = torch.randn(dy)
y = mean_functions.Constant(dy, val=val)(torch.rand(n, dx))
assert isinstance(y, torch.Tensor)
assert all(
[e == a for e, a in zip(y.flatten(), (val + torch.zeros(n, dy)).flatten())]
)
class TestZero(object):
def test_init(self):
mean_functions.Zero(2)
def test_forward(self):
n, dx, dy = 5, 3, 2
y = mean_functions.Zero(dy)(torch.rand(n, dx))
assert isinstance(y, torch.Tensor)
assert all([e == a for e, a in zip(y.flatten(), torch.zeros(n, dy).flatten())])
| 29.6 | 87 | 0.608859 | 1,132 | 0.84985 | 0 | 0 | 0 | 0 | 0 | 0 | 150 | 0.112613 |
d7f2887e18f1782d6580198e20f7cacc72ac9027 | 292 | py | Python | elasticine/adapter.py | Drizzt1991/plasticine | be61baa88f53bdfa666d068a14f17ccc0cfe4d02 | [
"MIT"
] | null | null | null | elasticine/adapter.py | Drizzt1991/plasticine | be61baa88f53bdfa666d068a14f17ccc0cfe4d02 | [
"MIT"
] | null | null | null | elasticine/adapter.py | Drizzt1991/plasticine | be61baa88f53bdfa666d068a14f17ccc0cfe4d02 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from elasticsearch import Elasticsearch
class ElasticAdapter(object):
""" Abstraction in case we will need to add another or change elastic
driver.
"""
def __init__(self, hosts, **es_params):
self.es = Elasticsearch(hosts, **es_params)
| 22.461538 | 73 | 0.660959 | 224 | 0.767123 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.39726 |
d7f2e97f589c36d477cdcfb9e7bf210c38f12e78 | 2,146 | py | Python | cosmos/admin/createproject.py | kuasha/peregrine | b3dd92146d26fe9e4ea589868431b590324b47d1 | [
"MIT"
] | 1 | 2018-10-12T15:12:15.000Z | 2018-10-12T15:12:15.000Z | cosmos/admin/createproject.py | kuasha/peregrine | b3dd92146d26fe9e4ea589868431b590324b47d1 | [
"MIT"
] | null | null | null | cosmos/admin/createproject.py | kuasha/peregrine | b3dd92146d26fe9e4ea589868431b590324b47d1 | [
"MIT"
] | null | null | null | """
Copyright (C) 2014 Maruf Maniruzzaman
Website: http://cosmosframework.com
Author: Maruf Maniruzzaman
License :: OSI Approved :: MIT License
"""
import os
import subprocess
from cosmos.admin.samples import barebonedef, simpledef, angulardef, angularbasicdef, adminpaneldef
def new_project(path, type=None):
file_data_list = barebonedef.file_data_list
if type == None:
type = "adminpanel"
if type == "angular":
file_data_list.extend(angulardef.file_data_list)
elif type == "angularbasic":
file_data_list.extend(angularbasicdef.file_data_list)
elif type == "adminpanel":
file_data_list.extend(adminpaneldef.file_data_list)
elif type == "simple":
file_data_list.extend(simpledef.file_data_list)
for file_data in file_data_list:
filename = file_data["name"]
data = file_data["data"]
if filename[0]=='/':
filename = filename[1:]
file_path = os.path.join(path, filename)
dir_name = os.path.dirname(file_path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(file_path, 'w') as content_file:
content_file.write(data)
if type == "angular":
print("-----------Cloning angular seed project--------------\n")
try:
subprocess.check_call(['git', "clone", "https://github.com/angular/angular-seed.git"])
print('----------- You should run "npm install" from angular-seed directory now -------------\n')
except subprocess.CalledProcessError:
print ("Clone failed (is git installed?). You may try to clone manually using 'git clone https://github.com/angular/angular-seed.git'")
elif type == "adminpanel":
print("---------------------------------------------------------------------------------------\n")
print('----------- You should run "bower install" from the project directory now -------------\n')
print('Bower or any other npm package is NOT required on production. Only for development purpose.\n')
if __name__ == "__main__":
path = os.getcwd()
new_project(path)
| 37.649123 | 147 | 0.613234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 863 | 0.402144 |
d7f300fca751082084b5f11135e6ffc4d5eded17 | 1,930 | py | Python | tests/commands/mc-1.14/test_loot.py | Le0Developer/mcast | bdabd897e399ff17c734b9e02d3e1e5099674a1c | [
"MIT"
] | 2 | 2021-12-28T14:10:13.000Z | 2022-01-12T16:59:20.000Z | tests/commands/mc-1.14/test_loot.py | Le0Developer/mcast | bdabd897e399ff17c734b9e02d3e1e5099674a1c | [
"MIT"
] | 11 | 2021-01-18T09:00:23.000Z | 2021-01-29T09:29:04.000Z | tests/commands/mc-1.14/test_loot.py | Le0Developer/mcast | bdabd897e399ff17c734b9e02d3e1e5099674a1c | [
"MIT"
] | null | null | null |
from mcfunction.versions.mc_1_14.loot import loot, ParsedLootCommand
from mcfunction.nodes import EntityNode, PositionNode
def test_loot_spawn():
parsed = loot.parse('loot spawn 0 0 0 kill @e')
parsed: ParsedLootCommand
assert parsed.target_type.value == 'spawn'
assert isinstance(parsed.target, PositionNode)
assert parsed.source_type.value == 'kill'
assert isinstance(parsed.source, EntityNode)
assert str(parsed) == 'loot spawn 0 0 0 kill @e'
def test_loot_replace():
parsed = loot.parse('loot replace entity @s hotbar.slot_number.0 9 '
'kill @e')
parsed: ParsedLootCommand
assert parsed.target_type.value == 'replace'
assert parsed.target_type2.value == 'entity'
assert isinstance(parsed.target, EntityNode)
assert parsed.slot.value == 'hotbar.slot_number.0'
assert parsed.count.value == 9
assert str(parsed) == 'loot replace entity @s hotbar.slot_number.0 9 ' \
'kill @e'
def test_loot_fish():
parsed = loot.parse('loot spawn 0 0 0 fish test:loot_table 0 0 0')
parsed: ParsedLootCommand
assert parsed.source_type.value == 'fish'
assert parsed.source.namespace == 'test'
assert parsed.source.name == 'loot_table'
assert isinstance(parsed.source_position, PositionNode)
assert str(parsed) == 'loot spawn 0 0 0 fish test:loot_table 0 0 0'
def test_loot_fish_tool():
parsed = loot.parse('loot spawn 0 0 0 fish test:loot_table 0 0 0 mainhand')
parsed: ParsedLootCommand
assert parsed.source_tool.value == 'mainhand'
assert str(parsed) == 'loot spawn 0 0 0 fish test:loot_table 0 0 0 ' \
'mainhand'
def test_loot_mine():
parsed = loot.parse('loot spawn 0 0 0 mine 0 0 0 mainhand')
parsed: ParsedLootCommand
assert parsed.source_tool.value == 'mainhand'
assert str(parsed) == 'loot spawn 0 0 0 mine 0 0 0 mainhand'
| 31.129032 | 79 | 0.681347 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 538 | 0.278756 |
d7f3e41ebb74525f88d4a1361289b542d0b673ce | 2,475 | py | Python | torch_glow/tests/nodes/quantized_batchnorm3d_relu_test.py | andrewmillspaugh/glow | d407e6171d3cacaaf64b2aae459f1c43e8ca077f | [
"Apache-2.0"
] | 1 | 2021-06-24T14:50:19.000Z | 2021-06-24T14:50:19.000Z | torch_glow/tests/nodes/quantized_batchnorm3d_relu_test.py | andrewmillspaugh/glow | d407e6171d3cacaaf64b2aae459f1c43e8ca077f | [
"Apache-2.0"
] | null | null | null | torch_glow/tests/nodes/quantized_batchnorm3d_relu_test.py | andrewmillspaugh/glow | d407e6171d3cacaaf64b2aae459f1c43e8ca077f | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch
import torch.nn as nn
from tests.utils import jitVsGlow
from torch.quantization import (
DeQuantStub,
QConfig,
QuantStub,
convert,
fuse_modules,
observer,
prepare,
)
my_qconfig = QConfig(
activation=observer.default_observer,
weight=observer.HistogramObserver.with_args(dtype=torch.qint8, reduce_range=False),
)
class TestQuantizedBatchNorm3DRelu(unittest.TestCase):
def test_batchnorm_relu_basic(self):
"""
Basic test of the PyTorch 3D batchnorm RELU Node on Glow.
"""
class SimpleQuantizedBatchNormRelu(nn.Module):
def __init__(self, w, b, m, v):
super(SimpleQuantizedBatchNormRelu, self).__init__()
self.bn = torch.nn.BatchNorm3d(4)
self.relu = torch.nn.ReLU()
self.bn.weight = torch.nn.Parameter(w)
self.bn.bias = torch.nn.Parameter(b)
self.bn.running_mean = m
self.bn.running_var = v
self.q = QuantStub()
self.dq = DeQuantStub()
def forward(self, x):
qx = self.q(x)
qy = self.bn(qx)
qy_relu = self.relu(qy)
y = self.dq(qy_relu)
return y
C = 4
weight = torch.ones(C) + torch.rand(C) * 0.001
bias = torch.rand(C) * 0.0001
running_mean = torch.zeros(C)
running_var = torch.ones(C)
inputs = torch.randn((10, C, 2, 3, 4), requires_grad=False)
model = SimpleQuantizedBatchNormRelu(weight, bias, running_mean, running_var)
model.eval()
model.qconfig = my_qconfig
modules_to_fuse = [["bn", "relu"]]
fuse_modules(model, modules_to_fuse, inplace=True)
prepare(model, inplace=True)
model.forward(inputs)
convert(model, inplace=True)
# Because of the difference of quantization between PyTorch & Glow
# We set eps big enough.
# Batchnorm introduced great accuracy issues, which could create up to
# ~1e-2 difference in some rare cases. In order to prevent this test
# to be flaky, atol is set to be 0.1.
jitVsGlow(
model,
inputs,
expected_fused_ops={"quantized::batch_norm3d_relu"},
atol=1e-1,
use_fp16=True,
)
| 31.730769 | 87 | 0.595556 | 2,008 | 0.811313 | 0 | 0 | 0 | 0 | 0 | 0 | 386 | 0.15596 |
d7f42c2ee4d4b43ecf45f119dc31feffe6b465d4 | 3,878 | py | Python | tests/ext/test_paginator.py | Descent098/hyde | 7f415402cc3e007a746eb2b5bc102281fdb415bd | [
"MIT"
] | 804 | 2015-01-03T22:52:38.000Z | 2022-02-19T08:47:54.000Z | tests/ext/test_paginator.py | Descent098/hyde | 7f415402cc3e007a746eb2b5bc102281fdb415bd | [
"MIT"
] | 86 | 2015-01-16T16:56:43.000Z | 2021-10-05T05:25:25.000Z | tests/ext/test_paginator.py | Descent098/hyde | 7f415402cc3e007a746eb2b5bc102281fdb415bd | [
"MIT"
] | 161 | 2015-01-06T18:52:17.000Z | 2022-02-04T21:21:54.000Z | # -*- coding: utf-8 -*-
"""
Use nose
`$ pip install nose`
`$ nosetests`
"""
from textwrap import dedent
from hyde.generator import Generator
from hyde.site import Site
from fswrap import File
TEST_SITE = File(__file__).parent.parent.child_folder('_test')
class TestPaginator(object):
def setUp(self):
TEST_SITE.make()
TEST_SITE.parent.child_folder(
'sites/test_paginator').copy_contents_to(TEST_SITE)
self.s = Site(TEST_SITE)
self.deploy = TEST_SITE.child_folder('deploy')
self.gen = Generator(self.s)
self.gen.load_site_if_needed()
self.gen.load_template_if_needed()
self.gen.generate_all()
def tearDown(self):
TEST_SITE.delete()
def test_page_no_paginator(self):
f = File(self.deploy.child('empty.txt'))
assert f.exists
def test_pages_of_one(self):
pages = ['pages_of_one.txt', 'page2/pages_of_one.txt',
'page3/pages_of_one.txt', 'page4/pages_of_one.txt']
files = [File(self.deploy.child(p)) for p in pages]
for f in files:
assert f.exists
page5 = File(self.deploy.child('page5/pages_of_one.txt'))
assert not page5.exists
def test_pages_of_one_content(self):
expected_page1_content = dedent('''\
Another Sad Post
/page2/pages_of_one.txt''')
expected_page2_content = dedent('''\
A Happy Post
/pages_of_one.txt
/page3/pages_of_one.txt''')
expected_page3_content = dedent('''\
An Angry Post
/page2/pages_of_one.txt
/page4/pages_of_one.txt''')
expected_page4_content = dedent('''\
A Sad Post
/page3/pages_of_one.txt
''')
page1 = self.deploy.child('pages_of_one.txt')
content = File(page1).read_all()
assert expected_page1_content == content
page2 = self.deploy.child('page2/pages_of_one.txt')
content = File(page2).read_all()
assert expected_page2_content == content
page3 = self.deploy.child('page3/pages_of_one.txt')
content = File(page3).read_all()
assert expected_page3_content == content
page4 = self.deploy.child('page4/pages_of_one.txt')
content = File(page4).read_all()
assert expected_page4_content == content
def test_pages_of_ten(self):
page1 = self.deploy.child('pages_of_ten.txt')
page2 = self.deploy.child('page2/pages_of_ten.txt')
assert File(page1).exists
assert not File(page2).exists
def test_pages_of_ten_depends(self):
depends = self.gen.deps['pages_of_ten.txt']
assert depends
assert len(depends) == 4
assert 'blog/sad-post.html' in depends
assert 'blog/another-sad-post.html' in depends
assert 'blog/angry-post.html' in depends
assert 'blog/happy-post.html' in depends
def test_pages_of_ten_content(self):
expected_content = dedent('''\
Another Sad Post
A Happy Post
An Angry Post
A Sad Post
''')
page = self.deploy.child('pages_of_ten.txt')
content = File(page).read_all()
assert expected_content == content
def test_pages_of_one_depends(self):
depends = self.gen.deps['pages_of_one.txt']
assert depends
assert len(depends) == 4
assert 'blog/sad-post.html' in depends
assert 'blog/another-sad-post.html' in depends
assert 'blog/angry-post.html' in depends
assert 'blog/happy-post.html' in depends
def test_custom_file_pattern(self):
page1 = self.deploy.child('custom_file_pattern.txt')
page2 = self.deploy.child('custom_file_pattern-2.txt')
assert File(page1).exists
assert File(page2).exists
| 30.535433 | 68 | 0.623259 | 3,617 | 0.932697 | 0 | 0 | 0 | 0 | 0 | 0 | 1,136 | 0.292935 |
d7f93ee532a3eebe25752a79442fb37dbe81093d | 2,162 | py | Python | config.py | JackToaster/Reassuring-Parable-Generator | 50a86793dfe81337c457a2ee373cfeb71af98c4a | [
"MIT"
] | 47 | 2017-06-12T03:51:15.000Z | 2021-06-15T04:59:55.000Z | config.py | bigDonJuan/Reassuring-Parable-Generator | 50a86793dfe81337c457a2ee373cfeb71af98c4a | [
"MIT"
] | 2 | 2017-07-11T18:56:06.000Z | 2017-07-26T02:44:39.000Z | config.py | bigDonJuan/Reassuring-Parable-Generator | 50a86793dfe81337c457a2ee373cfeb71af98c4a | [
"MIT"
] | 5 | 2017-06-12T07:17:40.000Z | 2021-03-14T00:11:50.000Z | import configparser as parser
import random
class config:
# load the configuration file
def __init__(self, config_filename):
self.load_config(config_filename)
def load_config(self, config_filename):
# create a config parser
config = parser.ConfigParser()
config.optionxform = str
# read the file
config.read(config_filename)
# read the values
dictionary = {}
for section in config.sections():
print('Found section: ' + section)
dictionary[section] = {}
for option in config.options(section):
dictionary[section][option] = config.get(section, option).splitlines()
self.phrases = dictionary['phrases']
if 'defaults' in dictionary and 'subjects' in dictionary:
self.has_subjects = True
self.defaults = dictionary['defaults']
self.subjects = dictionary['subjects']
for subject in self.subjects:
self.subjects[subject] = self.subjects[subject][0].split(',')
print('loaded defaults and subjects')
else:
self.has_subjects = False
def create_subjects(self, number = 0):
if number == 0:
number = int(self.defaults['num_subjects'][0])
if self.has_subjects:
first_subject = random.choice(list(self.subjects))
subjects = [first_subject]
for i in range(1,number):
subjects.append(self.get_adjacent_subject(subjects[i-1]))
self.current_subjects = subjects
else:
pass
def get_adjacent_subject(self, subject):
node = self.subjects[subject]
return random.choice(node)
def get_subject(self):
return random.choice(self.current_subjects)
def get_phrase(self, key):
try:
string_to_return = random.choice(self.phrases[key])
if string_to_return == 'none':
return ''
else:
return string_to_return
except:
print('Could not find phrases with key ' + key)
return ''
| 30.450704 | 86 | 0.587882 | 2,116 | 0.978723 | 0 | 0 | 0 | 0 | 0 | 0 | 242 | 0.111933 |
d7f94eb1f89f732db0b9df4a91ff2049a0a89f89 | 4,326 | py | Python | doc/source/EXAMPLES/mu_allsky_reproj.py | kapteyn-astro/kapteyn | f12332cfd567c7c0da40628dcfc7b297971ee636 | [
"BSD-3-Clause"
] | 3 | 2016-04-28T08:55:33.000Z | 2018-07-23T18:35:58.000Z | doc/source/EXAMPLES/mu_allsky_reproj.py | kapteyn-astro/kapteyn | f12332cfd567c7c0da40628dcfc7b297971ee636 | [
"BSD-3-Clause"
] | 2 | 2020-07-23T12:28:37.000Z | 2021-07-13T18:26:06.000Z | doc/source/EXAMPLES/mu_allsky_reproj.py | kapteyn-astro/kapteyn | f12332cfd567c7c0da40628dcfc7b297971ee636 | [
"BSD-3-Clause"
] | 3 | 2017-05-03T14:01:08.000Z | 2020-07-23T12:23:28.000Z | import numpy
from kapteyn import maputils
from matplotlib.pyplot import show, figure
import csv # Read some poitions from file in Comma Separated Values format
# Some initializations
blankcol = "#334455" # Represent undefined values by this color
epsilon = 0.0000000001
figsize = (9,7) # Figure size in inches
plotbox = (0.1,0.05,0.8,0.8)
fig = figure(figsize=figsize)
frame = fig.add_axes(plotbox)
Basefits = maputils.FITSimage("allsky_raw.fits") # Here is your downloaded FITS file in rectangular coordinates
Basefits.hdr['CTYPE1'] = 'GLON-CAR' # For transformations we need to give it a projection type
Basefits.hdr['CTYPE2'] = 'GLAT-CAR' # CAR is rectangular
# Use some header values to define reprojection parameters
cdelt1 = Basefits.hdr['CDELT1']
cdelt2 = Basefits.hdr['CDELT2']
naxis1 = Basefits.hdr['NAXIS1']
naxis2 = Basefits.hdr['NAXIS2']
# Header works only with a patched wcslib 4.3
# Note that changing CRVAL1 to 180 degerees, shifts the plot 180 deg.
header = {'NAXIS' : 2, 'NAXIS1': naxis1, 'NAXIS2': naxis2,
'CTYPE1' : 'GLON-AIT',
'CRVAL1' : 0, 'CRPIX1' : naxis1//2, 'CUNIT1' : 'deg', 'CDELT1' : cdelt1,
'CTYPE2' : 'GLAT-AIT',
'CRVAL2' : 30.0, 'CRPIX2' : naxis2//2, 'CUNIT2' : 'deg', 'CDELT2' : cdelt2,
'LONPOLE' :60.0,
'PV1_1' : 0.0, 'PV1_2' : 90.0, # IMPORTANT. This is a setting from Cal.section 7.1, p 1103
}
Reprojfits = Basefits.reproject_to(header)
annim_rep = Reprojfits.Annotatedimage(frame)
annim_rep.set_colormap("heat.lut") # Set color map before creating Image object
annim_rep.set_blankcolor(blankcol) # Background are NaN's (blanks). Set color here
annim_rep.Image(vmin=30000, vmax=150000) # Just a selection of two clip levels
annim_rep.plot()
# Draw the graticule, but do not cover near -90 to prevent ambiguity
X = numpy.arange(0,390.0,15.0);
Y = numpy.arange(-75,90,15.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
grat.setp_lineswcs0(0, color='w', lw=2)
grat.setp_lineswcs1(0, color='w', lw=2)
# Draw border with standard graticule, just to make the borders look smooth
header['CRVAL1'] = 0.0
header['CRVAL2'] = 0.0
del header['PV1_1']
del header['PV1_2']
header['LONPOLE'] = 0.0
header['LATPOLE'] = 0.0
border = annim.Graticule(header, axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),
startx=(180-epsilon, -180+epsilon), skipy=True)
border.setp_lineswcs0(color='w', lw=2) # Show borders in arbitrary color (e.g. background color)
border.setp_lineswcs1(color='w', lw=2)
# Plot the 'inside' graticules
lon_constval = 0.0
lat_constval = 0.0
lon_fmt = 'Dms'; lat_fmt = 'Dms' # Only Degrees must be plotted
addangle0 = addangle1=0.0
deltapx0 = deltapx1 = 1.0
labkwargs0 = {'color':'r', 'va':'center', 'ha':'center'}
labkwargs1 = {'color':'r', 'va':'center', 'ha':'center'}
lon_world = list(range(0,360,30))
lat_world = [-60, -30, 30, 60]
ilabs1 = grat.Insidelabels(wcsaxis=0,
world=lon_world, constval=lat_constval,
deltapx=1.0, deltapy=1.0,
addangle=addangle0, fmt=lon_fmt, **labkwargs0)
ilabs2 = grat.Insidelabels(wcsaxis=1,
world=lat_world, constval=lon_constval,
deltapx=1.0, deltapy=1.0,
addangle=addangle1, fmt=lat_fmt, **labkwargs1)
# Read marker positions (in 0h0m0s 0d0m0s format) from file
reader = csv.reader(open("positions.txt"), delimiter=' ', skipinitialspace=True)
for line in reader:
if line:
hms, dms = line
postxt = "{eq fk4-no-e} "+hms+" {} "+dms # Define the sky system of the source
print(postxt)
annim.Marker(pos=postxt, marker='*', color='yellow', ms=20)
# Plot a title
titlepos = 1.02
title = r"""All sky map in Hammer Aitoff projection (AIT) oblique with:
$(\alpha_p,\delta_p) = (0^\circ,30^\circ)$, $\phi_p = 75^\circ$ also:
$(\phi_0,\theta_0) = (0^\circ,90^\circ)$."""
t = frame.set_title(title, color='g', fontsize=13, linespacing=1.5)
t.set_y(titlepos)
annim.plot()
annim.interact_toolbarinfo()
annim_rep.interact_imagecolors()
show() | 41.596154 | 112 | 0.652566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,649 | 0.381184 |
d7f94f629fff6c0ac118aadeea502e8373fe6d7b | 11,303 | py | Python | libs/Signals.py | lionheart/TimeTracker-Linux | 64405d53fd12d2593ef4879b867ff38a4d5b9ca9 | [
"MIT"
] | 12 | 2015-02-06T19:06:49.000Z | 2019-09-24T17:58:17.000Z | libs/Signals.py | lionheart/TimeTracker-Linux | 64405d53fd12d2593ef4879b867ff38a4d5b9ca9 | [
"MIT"
] | null | null | null | libs/Signals.py | lionheart/TimeTracker-Linux | 64405d53fd12d2593ef4879b867ff38a4d5b9ca9 | [
"MIT"
] | 6 | 2015-11-22T01:58:31.000Z | 2019-11-04T22:56:38.000Z | import sys
import gtk
from datetime import datetime
import gobject
from threading import Thread
class uiSignalHelpers(object):
def __init__(self, *args, **kwargs):
super(uiSignalHelpers, self).__init__(*args, **kwargs)
#print 'signal helpers __init__'
def callback(self, *args, **kwargs):
super(uiSignalHelpers, self).callback(*args, **kwargs)
#print 'signal helpers callback'
def gtk_widget_show(self, w, e = None):
w.show()
return True
def gtk_widget_hide(self, w, e = None):
w.hide()
return True
def information_message(self, widget, message, cb = None):
self.attention = "INFO: %s" % message
messagedialog = gtk.MessageDialog(widget, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_INFO, gtk.BUTTONS_OK, message)
messagedialog.connect("delete-event", lambda w, e: w.hide() or True)
if cb:
messagedialog.connect("response", cb)
messagedialog.set_default_response(gtk.RESPONSE_OK)
messagedialog.show()
messagedialog.present()
return messagedialog
def error_message(self, widget, message):
self.attention = "ERROR: %s" % message
messagedialog = gtk.MessageDialog(widget, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_ERROR, gtk.BUTTONS_CANCEL, message)
messagedialog.run()
messagedialog.destroy()
def warning_message(self, widget, message):
self.attention = "WARNING: %s" % message
messagedialog = gtk.MessageDialog(widget, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_WARNING, gtk.BUTTONS_OK_CANCEL, message)
messagedialog.show()
messagedialog.present()
messagedialog.run()
messagedialog.destroy()
def question_message(self, widget, message, cb = None):
self.attention = "QUESTION: %s" % message
messagedialog = gtk.MessageDialog(widget, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO, message)
messagedialog.connect("delete-event", lambda w, e: w.hide() or True)
if cb:
messagedialog.connect("response", cb)
messagedialog.set_default_response(gtk.RESPONSE_YES)
messagedialog.show()
messagedialog.present()
return messagedialog
def interval_dialog(self, message):
if not self.interval_dialog_showing:
if not self.timetracker_window.is_active():
self.timetracker_window.show()
self.timetracker_window.present()
self.interval_dialog_showing = True
return self.question_message(self.timetracker_window, message, self.on_interval_dialog)
return None
def stop_interval_dialog(self, message):
if not self.stop_interval_dialog_showing:
if not self.timetracker_window.is_active():
self.timetracker_window.show()
self.timetracker_window.present()
self.stop_interval_dialog_showing = True
return self.information_message(self.timetracker_window, message, self.on_stopped)
return None
def set_custom_label(self, widget, text):
#set custom label on stock button
Label = widget.get_children()[0]
Label = Label.get_children()[0].get_children()[1]
Label = Label.set_label(text)
def window_state(self, widget, state):
self.timetracker_window_state = state.new_window_state
class uiSignals(uiSignalHelpers):
def __init__(self, *args, **kwargs):
super(uiSignals, self).__init__(*args, **kwargs)
#these are components defined inside the ui file
#print 'signals __init__'
self.preferences_window.connect('delete-event', lambda w, e: w.hide() or True)
self.timetracker_window.connect('delete-event', lambda w, e: w.hide() or True)
self.timetracker_window.connect('destroy', lambda w, e: w.hide() or True)
self.timetracker_window.connect("window-state-event", self.window_state)
self.about_dialog.connect("delete-event", lambda w, e: w.hide() or True)
self.about_dialog.connect("response", lambda w, e: w.hide() or True)
self.notes_textview.connect('key_press_event', self.on_textview_ctrl_enter)
def callback(self, *args, **kwargs): #stub
super(uiSignals, self).callback(*args, **kwargs) #executed after init, hopefully this will let me inject interrupts
#print 'signals callback'
self.icon.connect('activate', self.left_click)
self.icon.connect("popup-menu", self.right_click)
if sys.platform == "win32":
from gtkwin32 import GTKWin32Ext
self.timetracker_window.realize()
self.win32ext = GTKWin32Ext(self.timetracker_window)
self.win32ext.add_notify_icon()
def before_init(self): #stub for later
#print 'signals before init'
pass
def after_init(self): #init any other callback we can't setup in the actual init phase
#print 'signals after init'
self.project_combobox_handler = self.project_combobox.connect('changed', self.on_project_combobox_changed)
self.task_combobox_handler = self.task_combobox.connect('changed', self.on_task_combobox_changed)
def on_show_about_dialog(self, widget):
self.about_dialog.show()
def on_interval_dialog(self, dialog, a): #interval_dialog callback
if a == gtk.RESPONSE_NO:
self.refresh_and_show()
else:
#keep the timer running
self.running = True
self.current_selected_project_id = self.last_project_id
self.current_selected_task_id = self.last_task_id
self.current_notes = self.get_notes(self.last_notes)
self.current_hours = "%0.02f" % round(float(self.last_hours) + float(self.interval), 2)
self.current_text = self.last_text
self.current_entry_id = self.last_entry_id
entry = self.harvest.update(self.current_entry_id, {#append to existing timer
'notes': self.current_notes,
'hours': self.current_hours,
'project_id': self.current_project_id,
'task_id': self.current_task_id
})
self.refresh_and_show()
self.timetracker_window.hide() #hide timetracker and continue task
dialog.destroy()
self.attention = None
self.interval_dialog_showing = False
def on_textview_ctrl_enter(self, widget, event):
'''
submit clicked event on ctrl+enter in notes textview
'''
if event.state == gtk.gdk.CONTROL_MASK and \
gtk.gdk.keyval_name(event.keyval) == "Return":
self.submit_button.emit('clicked')
def on_stopped(self, dialog):
if not self.timetracker_window.is_active():
self.timetracker_window.show()
self.timetracker_window.present()
dialog.destroy()
self.attention = None
self.stop_interval_dialog_showing = False
def on_save_preferences_button_clicked(self, widget):
if self.running: #if running it will turn off, lets empty the comboboxes
#stop the timer
#self.toggle_current_timer(self.current_entry_id) #maybe add pref option to kill timer on pref change?
if self.interval_dialog_instance:
self.interval_dialog_instance.hide() #hide the dialog
self.stop_and_refactor_time()
self.get_prefs()
if self.connect_to_harvest():
self.preferences_window.hide()
self.timetracker_window.show()
self.timetracker_window.present()
def on_task_combobox_changed(self, widget):
new_idx = widget.get_active()
if new_idx != -1:
if new_idx != self.current_selected_task_idx: #-1 is sent from pygtk loop or something
self.current_selected_task_id = self.get_combobox_selection(widget)
self.current_selected_task_idx = new_idx
self.refresh_comboboxes()
def on_project_combobox_changed(self, widget):
self.current_selected_project_id = self.get_combobox_selection(widget)
new_idx = widget.get_active()
if new_idx != -1:
#reset task when new project is selected
self.current_selected_project_idx = new_idx
self.current_selected_task_id = None
self.current_selected_task_idx = 0
self.refresh_comboboxes()
def on_show_preferences(self, widget):
self.preferences_window.show()
self.preferences_window.present()
def on_away_from_desk(self, widget):
#toggle away state
if self.running:
self.away_from_desk = True if not self.away_from_desk else False
def on_check_for_updates(self, widget):
pass
def on_top(self, widget):
self.always_on_top = False if self.always_on_top else True
self.timetracker_window.set_keep_above(self.always_on_top)
def on_submit_button_clicked(self, widget):
self.away_from_desk = False
self.attention = None
self.append_add_entry()
self.set_textview_text(self.notes_textview, "")
self.notes_textview.grab_focus()
def on_stop_timer(self, widget):
self.stop_and_refactor_time()
def on_quit(self, widget):
if self.running and self.harvest:
self.harvest.toggle_timer(self.current_entry_id)
gtk.main_quit()
def refresh_and_show(self):
self.set_entries()
self.timetracker_window.show()
self.timetracker_window.present()
self.notes_textview.grab_focus()
def on_refresh(self, widget):
self.refresh_and_show()
def left_click(self, widget):
self.refresh_and_show()
def right_click(self, widget, button, time):
#create popup menu
menu = gtk.Menu()
refresh = gtk.ImageMenuItem(gtk.STOCK_REFRESH)
refresh.connect("activate", self.on_refresh)
menu.append(refresh)
if self.running:
stop_timer = gtk.MenuItem("Stop Timer")
stop_timer.connect("activate", self.on_stop_timer)
menu.append(stop_timer)
if not self.away_from_desk:
away = gtk.ImageMenuItem(gtk.STOCK_NO)
away.set_label("Away from desk")
else:
away = gtk.ImageMenuItem(gtk.STOCK_YES)
away.set_label("Back at desk")
away.connect("activate", self.on_away_from_desk)
menu.append(away)
top = gtk.MenuItem("Always on top")
prefs = gtk.MenuItem("Preferences")
about = gtk.MenuItem("About")
quit = gtk.MenuItem("Quit")
top.connect("activate", self.on_top)
prefs.connect("activate", self.on_show_preferences)
about.connect("activate", self.on_show_about_dialog)
quit.connect("activate", self.on_quit)
menu.append(prefs)
menu.append(top)
menu.append(about)
menu.append(quit)
menu.show_all()
menu.popup(None, None, gtk.status_icon_position_menu, button, time, self.icon)
| 37.427152 | 153 | 0.654605 | 11,202 | 0.991064 | 0 | 0 | 0 | 0 | 0 | 0 | 1,344 | 0.118906 |
d7f9aad3d7bc607d82f1ebb3a981da2c6ca55952 | 21,622 | py | Python | tests/app/main/views/test_feedback.py | alphagov-mirror/notifications-admin | 04d051df6b85cf596a7d6d0f28474b04673e420a | [
"MIT"
] | null | null | null | tests/app/main/views/test_feedback.py | alphagov-mirror/notifications-admin | 04d051df6b85cf596a7d6d0f28474b04673e420a | [
"MIT"
] | null | null | null | tests/app/main/views/test_feedback.py | alphagov-mirror/notifications-admin | 04d051df6b85cf596a7d6d0f28474b04673e420a | [
"MIT"
] | null | null | null | from functools import partial
from unittest.mock import ANY, PropertyMock
import pytest
from bs4 import BeautifulSoup, element
from flask import url_for
from freezegun import freeze_time
from app.main.views.feedback import in_business_hours
from app.models.feedback import (
GENERAL_TICKET_TYPE,
PROBLEM_TICKET_TYPE,
QUESTION_TICKET_TYPE,
)
from tests.conftest import normalize_spaces
def no_redirect():
return lambda _external=True: None
def test_get_support_index_page(
client_request,
):
page = client_request.get('.support')
assert page.select_one('form')['method'] == 'post'
assert 'action' not in page.select_one('form')
assert normalize_spaces(page.select_one('h1').text) == 'Support'
assert normalize_spaces(
page.select_one('form label[for=support_type-0]').text
) == 'Report a problem'
assert page.select_one('form input#support_type-0')['value'] == 'report-problem'
assert normalize_spaces(
page.select_one('form label[for=support_type-1]').text
) == 'Ask a question or give feedback'
assert page.select_one('form input#support_type-1')['value'] == 'ask-question-give-feedback'
assert normalize_spaces(
page.select_one('form button[type=submit]').text
) == 'Continue'
def test_get_support_index_page_when_signed_out(
client_request,
):
client_request.logout()
page = client_request.get('.support')
assert page.select_one('form')['method'] == 'post'
assert 'action' not in page.select_one('form')
assert normalize_spaces(
page.select_one('form label[for=who-0]').text
) == (
'I work in the public sector and need to send emails, text messages or letters'
)
assert page.select_one('form input#who-0')['value'] == 'public-sector'
assert normalize_spaces(
page.select_one('form label[for=who-1]').text
) == (
'I’m a member of the public with a question for the government'
)
assert page.select_one('form input#who-1')['value'] == 'public'
assert normalize_spaces(
page.select_one('form button[type=submit]').text
) == 'Continue'
@freeze_time('2016-12-12 12:00:00.000000')
@pytest.mark.parametrize('support_type, expected_h1', [
(PROBLEM_TICKET_TYPE, 'Report a problem'),
(QUESTION_TICKET_TYPE, 'Ask a question or give feedback'),
])
def test_choose_support_type(
client_request,
mock_get_non_empty_organisations_and_services_for_user,
support_type,
expected_h1
):
page = client_request.post(
'main.support',
_data={'support_type': support_type},
_follow_redirects=True,
)
assert page.h1.string.strip() == expected_h1
assert not page.select_one('input[name=name]')
assert not page.select_one('input[name=email_address]')
assert page.find('form').find('p').text.strip() == (
'We’ll reply to test@user.gov.uk'
)
@freeze_time('2016-12-12 12:00:00.000000')
def test_get_support_as_someone_in_the_public_sector(
client_request,
):
client_request.logout()
page = client_request.post(
'main.support',
_data={'who': 'public-sector'},
_follow_redirects=True,
)
assert normalize_spaces(page.select('h1')) == (
'Contact GOV.UK Notify support'
)
assert page.select_one('form textarea[name=feedback]')
assert page.select_one('form input[name=name]')
assert page.select_one('form input[name=email_address]')
assert page.select_one('form button[type=submit]')
def test_get_support_as_member_of_public(
client_request,
):
client_request.logout()
page = client_request.post(
'main.support',
_data={'who': 'public'},
_follow_redirects=True,
)
assert normalize_spaces(page.select('h1')) == (
'The GOV.UK Notify service is for people who work in the government'
)
assert len(page.select('h2 a')) == 3
assert not page.select('form')
assert not page.select('input')
assert not page.select('form [type=submit]')
@freeze_time('2016-12-12 12:00:00.000000')
@pytest.mark.parametrize('ticket_type, expected_status_code', [
(PROBLEM_TICKET_TYPE, 200),
(QUESTION_TICKET_TYPE, 200),
('gripe', 404)
])
def test_get_feedback_page(client, ticket_type, expected_status_code):
response = client.get(url_for('main.feedback', ticket_type=ticket_type))
assert response.status_code == expected_status_code
@freeze_time('2016-12-12 12:00:00.000000')
@pytest.mark.parametrize('ticket_type', [PROBLEM_TICKET_TYPE, QUESTION_TICKET_TYPE])
def test_passed_non_logged_in_user_details_through_flow(client, mocker, ticket_type):
mock_post = mocker.patch('app.main.views.feedback.zendesk_client.create_ticket')
data = {'feedback': 'blah', 'name': 'Steve Irwin', 'email_address': 'rip@gmail.com'}
resp = client.post(
url_for('main.feedback', ticket_type=ticket_type),
data=data
)
assert resp.status_code == 302
assert resp.location == url_for(
'main.thanks',
out_of_hours_emergency=False,
email_address_provided=True,
_external=True,
)
mock_post.assert_called_with(
subject='Notify feedback',
message='blah\n',
user_email='rip@gmail.com',
user_name='Steve Irwin',
ticket_type=ticket_type,
p1=ANY
)
@freeze_time("2016-12-12 12:00:00.000000")
@pytest.mark.parametrize('data', [
{'feedback': 'blah'},
{'feedback': 'blah', 'name': 'Ignored', 'email_address': 'ignored@email.com'}
])
@pytest.mark.parametrize('ticket_type', [PROBLEM_TICKET_TYPE, QUESTION_TICKET_TYPE])
def test_passes_user_details_through_flow(
client_request,
mock_get_non_empty_organisations_and_services_for_user,
mocker,
ticket_type,
data
):
mock_post = mocker.patch('app.main.views.feedback.zendesk_client.create_ticket')
client_request.post(
'main.feedback',
ticket_type=ticket_type,
_data=data,
_expected_status=302,
_expected_redirect=url_for(
'main.thanks',
email_address_provided=True,
out_of_hours_emergency=False,
_external=True,
),
)
mock_post.assert_called_with(
subject='Notify feedback',
message=ANY,
user_email='test@user.gov.uk',
user_name='Test User',
ticket_type=ticket_type,
p1=ANY
)
assert mock_post.call_args[1]['message'] == '\n'.join([
'blah',
'Service: "service one"',
url_for(
'main.service_dashboard',
service_id='596364a0-858e-42c8-9062-a8fe822260eb',
_external=True
),
''
])
@freeze_time('2016-12-12 12:00:00.000000')
@pytest.mark.parametrize('data', [
{'feedback': 'blah', 'name': 'Fred'},
{'feedback': 'blah'},
])
@pytest.mark.parametrize('ticket_type', [
PROBLEM_TICKET_TYPE,
QUESTION_TICKET_TYPE,
])
def test_email_address_required_for_problems_and_questions(
client_request,
mocker,
data,
ticket_type,
):
mocker.patch('app.main.views.feedback.zendesk_client')
client_request.logout()
page = client_request.post(
'main.feedback',
ticket_type=ticket_type,
_data=data,
_expected_status=200
)
assert isinstance(page.find('span', {'class': 'govuk-error-message'}), element.Tag)
@freeze_time('2016-12-12 12:00:00.000000')
@pytest.mark.parametrize('ticket_type', (
PROBLEM_TICKET_TYPE, QUESTION_TICKET_TYPE
))
def test_email_address_must_be_valid_if_provided_to_support_form(
client,
mocker,
ticket_type,
):
response = client.post(
url_for('main.feedback', ticket_type=ticket_type),
data={
'feedback': 'blah',
'email_address': 'not valid',
},
)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert normalize_spaces(page.select_one('span.govuk-error-message').text) == (
'Error: Enter a valid email address'
)
@pytest.mark.parametrize('ticket_type, severe, is_in_business_hours, is_out_of_hours_emergency', [
# business hours, never an emergency
(PROBLEM_TICKET_TYPE, 'yes', True, False),
(QUESTION_TICKET_TYPE, 'yes', True, False),
(PROBLEM_TICKET_TYPE, 'no', True, False),
(QUESTION_TICKET_TYPE, 'no', True, False),
# out of hours, if the user says it’s not an emergency
(PROBLEM_TICKET_TYPE, 'no', False, False),
(QUESTION_TICKET_TYPE, 'no', False, False),
# out of hours, only problems can be emergencies
(PROBLEM_TICKET_TYPE, 'yes', False, True),
(QUESTION_TICKET_TYPE, 'yes', False, False),
])
def test_urgency(
client_request,
mock_get_non_empty_organisations_and_services_for_user,
mocker,
ticket_type,
severe,
is_in_business_hours,
is_out_of_hours_emergency,
):
mocker.patch('app.main.views.feedback.in_business_hours', return_value=is_in_business_hours)
mock_post = mocker.patch('app.main.views.feedback.zendesk_client.create_ticket')
client_request.post(
'main.feedback',
ticket_type=ticket_type,
severe=severe,
_data={'feedback': 'blah', 'email_address': 'test@example.com'},
_expected_status=302,
_expected_redirect=url_for(
'main.thanks',
out_of_hours_emergency=is_out_of_hours_emergency,
email_address_provided=True,
_external=True,
),
)
assert mock_post.call_args[1]['p1'] == is_out_of_hours_emergency
ids, params = zip(*[
('non-logged in users always have to triage', (
GENERAL_TICKET_TYPE, False, False, True,
302, partial(url_for, 'main.triage', ticket_type=GENERAL_TICKET_TYPE)
)),
('trial services are never high priority', (
PROBLEM_TICKET_TYPE, False, True, False,
200, no_redirect()
)),
('we can triage in hours', (
PROBLEM_TICKET_TYPE, True, True, True,
200, no_redirect()
)),
('only problems are high priority', (
QUESTION_TICKET_TYPE, False, True, True,
200, no_redirect()
)),
('should triage out of hours', (
PROBLEM_TICKET_TYPE, False, True, True,
302, partial(url_for, 'main.triage', ticket_type=PROBLEM_TICKET_TYPE)
))
])
@pytest.mark.parametrize(
(
'ticket_type, is_in_business_hours, logged_in, has_live_services,'
'expected_status, expected_redirect'
),
params, ids=ids
)
def test_redirects_to_triage(
client,
api_user_active,
mocker,
mock_get_user,
ticket_type,
is_in_business_hours,
logged_in,
has_live_services,
expected_status,
expected_redirect,
):
mocker.patch(
'app.models.user.User.live_services',
new_callable=PropertyMock,
return_value=[{}, {}] if has_live_services else [],
)
mocker.patch('app.main.views.feedback.in_business_hours', return_value=is_in_business_hours)
if logged_in:
client.login(api_user_active)
response = client.get(url_for('main.feedback', ticket_type=ticket_type))
assert response.status_code == expected_status
assert response.location == expected_redirect(_external=True)
@pytest.mark.parametrize('ticket_type, expected_h1', (
(PROBLEM_TICKET_TYPE, 'Report a problem'),
(GENERAL_TICKET_TYPE, 'Contact GOV.UK Notify support'),
))
def test_options_on_triage_page(
client_request,
ticket_type,
expected_h1,
):
page = client_request.get('main.triage', ticket_type=ticket_type)
assert normalize_spaces(page.select_one('h1').text) == expected_h1
assert page.select('form input[type=radio]')[0]['value'] == 'yes'
assert page.select('form input[type=radio]')[1]['value'] == 'no'
def test_doesnt_lose_message_if_post_across_closing(
client_request,
mocker,
):
mocker.patch('app.models.user.User.live_services', return_value=True)
mocker.patch('app.main.views.feedback.in_business_hours', return_value=False)
page = client_request.post(
'main.feedback',
ticket_type=PROBLEM_TICKET_TYPE,
_data={'feedback': 'foo'},
_expected_status=302,
_expected_redirect=url_for('.triage', ticket_type=PROBLEM_TICKET_TYPE, _external=True),
)
with client_request.session_transaction() as session:
assert session['feedback_message'] == 'foo'
page = client_request.get(
'main.feedback',
ticket_type=PROBLEM_TICKET_TYPE,
severe='yes',
)
with client_request.session_transaction() as session:
assert page.find('textarea', {'name': 'feedback'}).text == '\r\nfoo'
assert 'feedback_message' not in session
@pytest.mark.parametrize('when, is_in_business_hours', [
('2016-06-06 09:29:59+0100', False), # opening time, summer and winter
('2016-12-12 09:29:59+0000', False),
('2016-06-06 09:30:00+0100', True),
('2016-12-12 09:30:00+0000', True),
('2016-12-12 12:00:00+0000', True), # middle of the day
('2016-12-12 17:29:59+0000', True), # closing time
('2016-12-12 17:30:00+0000', False),
('2016-12-10 12:00:00+0000', False), # Saturday
('2016-12-11 12:00:00+0000', False), # Sunday
('2016-01-01 12:00:00+0000', False), # Bank holiday
])
def test_in_business_hours(when, is_in_business_hours):
with freeze_time(when):
assert in_business_hours() == is_in_business_hours
@pytest.mark.parametrize('ticket_type', (
GENERAL_TICKET_TYPE,
PROBLEM_TICKET_TYPE,
))
@pytest.mark.parametrize('choice, expected_redirect_param', [
('yes', 'yes'),
('no', 'no'),
])
def test_triage_redirects_to_correct_url(
client_request,
ticket_type,
choice,
expected_redirect_param,
):
client_request.post(
'main.triage',
ticket_type=ticket_type,
_data={'severe': choice},
_expected_status=302,
_expected_redirect=url_for(
'main.feedback',
ticket_type=ticket_type,
severe=expected_redirect_param,
_external=True,
),
)
@pytest.mark.parametrize('extra_args, expected_back_link', [
(
{'severe': 'yes'},
partial(url_for, 'main.triage', ticket_type=PROBLEM_TICKET_TYPE)
),
(
{'severe': 'no'},
partial(url_for, 'main.triage', ticket_type=PROBLEM_TICKET_TYPE)
),
(
{'severe': 'foo'}, # hacking the URL
partial(url_for, 'main.support')
),
(
{},
partial(url_for, 'main.support')
),
])
@freeze_time('2012-12-12 12:12')
def test_back_link_from_form(
client_request,
mock_get_non_empty_organisations_and_services_for_user,
extra_args,
expected_back_link,
):
page = client_request.get(
'main.feedback',
ticket_type=PROBLEM_TICKET_TYPE,
**extra_args
)
assert page.select_one('.govuk-back-link')['href'] == expected_back_link()
assert normalize_spaces(page.select_one('h1').text) == 'Report a problem'
@pytest.mark.parametrize(
(
'is_in_business_hours, severe,'
'expected_status_code, expected_redirect,'
'expected_status_code_when_logged_in, expected_redirect_when_logged_in'
),
[
(
True, 'yes',
200, no_redirect(),
200, no_redirect()
),
(
True, 'no',
200, no_redirect(),
200, no_redirect()
),
(
False, 'no',
200, no_redirect(),
200, no_redirect(),
),
# Treat empty query param as mangled URL – ask question again
(
False, '',
302, partial(url_for, 'main.triage', ticket_type=PROBLEM_TICKET_TYPE),
302, partial(url_for, 'main.triage', ticket_type=PROBLEM_TICKET_TYPE),
),
# User hasn’t answered the triage question
(
False, None,
302, partial(url_for, 'main.triage', ticket_type=PROBLEM_TICKET_TYPE),
302, partial(url_for, 'main.triage', ticket_type=PROBLEM_TICKET_TYPE),
),
# Escalation is needed for non-logged-in users
(
False, 'yes',
302, partial(url_for, 'main.bat_phone'),
200, no_redirect(),
),
]
)
def test_should_be_shown_the_bat_email(
client,
active_user_with_permissions,
mocker,
service_one,
mock_get_non_empty_organisations_and_services_for_user,
is_in_business_hours,
severe,
expected_status_code,
expected_redirect,
expected_status_code_when_logged_in,
expected_redirect_when_logged_in,
):
mocker.patch('app.main.views.feedback.in_business_hours', return_value=is_in_business_hours)
feedback_page = url_for('main.feedback', ticket_type=PROBLEM_TICKET_TYPE, severe=severe)
response = client.get(feedback_page)
assert response.status_code == expected_status_code
assert response.location == expected_redirect(_external=True)
# logged in users should never be redirected to the bat email page
client.login(active_user_with_permissions, mocker, service_one)
logged_in_response = client.get(feedback_page)
assert logged_in_response.status_code == expected_status_code_when_logged_in
assert logged_in_response.location == expected_redirect_when_logged_in(_external=True)
@pytest.mark.parametrize(
(
'severe,'
'expected_status_code, expected_redirect,'
'expected_status_code_when_logged_in, expected_redirect_when_logged_in'
),
[
# User hasn’t answered the triage question
(
None,
302, partial(url_for, 'main.triage', ticket_type=GENERAL_TICKET_TYPE),
302, partial(url_for, 'main.triage', ticket_type=GENERAL_TICKET_TYPE),
),
# Escalation is needed for non-logged-in users
(
'yes',
302, partial(url_for, 'main.bat_phone'),
200, no_redirect(),
),
]
)
def test_should_be_shown_the_bat_email_for_general_questions(
client,
active_user_with_permissions,
mocker,
service_one,
mock_get_non_empty_organisations_and_services_for_user,
severe,
expected_status_code,
expected_redirect,
expected_status_code_when_logged_in,
expected_redirect_when_logged_in,
):
mocker.patch('app.main.views.feedback.in_business_hours', return_value=False)
feedback_page = url_for('main.feedback', ticket_type=GENERAL_TICKET_TYPE, severe=severe)
response = client.get(feedback_page)
assert response.status_code == expected_status_code
assert response.location == expected_redirect(_external=True)
# logged in users should never be redirected to the bat email page
client.login(active_user_with_permissions, mocker, service_one)
logged_in_response = client.get(feedback_page)
assert logged_in_response.status_code == expected_status_code_when_logged_in
assert logged_in_response.location == expected_redirect_when_logged_in(_external=True)
def test_bat_email_page(
client,
active_user_with_permissions,
mocker,
service_one,
):
bat_phone_page = url_for('main.bat_phone')
response = client.get(bat_phone_page)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.select('main a')[0].text == 'Back'
assert page.select('main a')[0]['href'] == url_for('main.support')
assert page.select('main a')[2].text == 'Fill in this form'
assert page.select('main a')[2]['href'] == url_for('main.feedback', ticket_type=PROBLEM_TICKET_TYPE, severe='no')
next_page_response = client.get(page.select('main a')[2]['href'])
next_page = BeautifulSoup(next_page_response.data.decode('utf-8'), 'html.parser')
assert next_page.h1.text.strip() == 'Report a problem'
client.login(active_user_with_permissions, mocker, service_one)
logged_in_response = client.get(bat_phone_page)
assert logged_in_response.status_code == 302
assert logged_in_response.location == url_for('main.feedback', ticket_type=PROBLEM_TICKET_TYPE, _external=True)
@pytest.mark.parametrize('out_of_hours_emergency, email_address_provided, out_of_hours, message', (
# Out of hours emergencies trump everything else
(
True, True, True,
'We’ll reply in the next 30 minutes.',
),
(
True, False, False, # Not a real scenario
'We’ll reply in the next 30 minutes.',
),
# Anonymous tickets don’t promise a reply
(
False, False, False,
'We’ll read your message in the next 30 minutes.',
),
(
False, False, True,
'We’ll read your message when we’re back in the office.',
),
# When we look at your ticket depends on whether we’re in normal
# business hours
(
False, True, False,
'We’ll read your message in the next 30 minutes and reply within one working day.',
),
(
False, True, True,
'We’ll reply within one working day.'
),
))
def test_thanks(
client_request,
mocker,
api_user_active,
mock_get_user,
out_of_hours_emergency,
email_address_provided,
out_of_hours,
message,
):
mocker.patch('app.main.views.feedback.in_business_hours', return_value=(not out_of_hours))
page = client_request.get(
'main.thanks',
out_of_hours_emergency=out_of_hours_emergency,
email_address_provided=email_address_provided,
)
assert normalize_spaces(page.find('main').find('p').text) == message
| 31.021521 | 117 | 0.668671 | 0 | 0 | 0 | 0 | 16,146 | 0.745705 | 0 | 0 | 5,917 | 0.273277 |
d7fb0bf23e6532c9112d4c611274898b7e7bac1e | 3,022 | py | Python | models/model.py | lyjeff/dogs-vs-cats | 727c4b299ff38aa04f9e5928e8696c0afcb6f6bf | [
"MIT"
] | null | null | null | models/model.py | lyjeff/dogs-vs-cats | 727c4b299ff38aa04f9e5928e8696c0afcb6f6bf | [
"MIT"
] | null | null | null | models/model.py | lyjeff/dogs-vs-cats | 727c4b299ff38aa04f9e5928e8696c0afcb6f6bf | [
"MIT"
] | null | null | null | import sys
import torch.nn as nn
from torchsummary import summary
from torchvision.models import vgg19, resnet50, densenet161, googlenet, inception_v3
from .MyCNN import MyCNN
def VGG19(all=False):
model = vgg19(pretrained=True)
# 把參數凍結
if all is False:
for param in model.parameters():
param.requires_grad = False
# Replace the last fully-connected layer
# Parameters of newly constructed modules have requires_grad=True by default
model.classifier[6] = nn.Linear(4096, 2)
return model
def VGG19_2(all=False):
model = vgg19(pretrained=True)
# 把參數凍結
if all is False:
for param in model.parameters():
param.requires_grad = False
model.classifier[3] = nn.Linear(4096, 1024)
model.classifier[6] = nn.Linear(1024, 2)
return model
def ResNet(all=False):
model = resnet50(pretrained=True)
# 把參數凍結
if all is False:
for param in model.parameters():
param.requires_grad = False
# 修改全連線層的輸出
model.fc = nn.Linear(2048, 2)
return model
def Densenet(all=False):
model = densenet161()
# 把參數凍結
if all is False:
for param in model.parameters():
param.requires_grad = False
# 修改全連線層的輸出
model.classifier = nn.Linear(2208, 2)
return model
def GoogleNet(all=False):
model = googlenet(pretrained=True)
# 把參數凍結
if all is False:
for param in model.parameters():
param.requires_grad = False
# Replace the last fully-connected layer
# Parameters of newly constructed modules have requires_grad=True by default
model.fc = nn.Linear(1024, 2)
return model
def inceptionv3(all=False):
model = inception_v3(pretrained=True, aux_logits=False)
# 把參數凍結
if all is False:
for param in model.parameters():
param.requires_grad = False
# Replace the last fully-connected layer
# Parameters of newly constructed modules have requires_grad=True by default
model.fc = nn.Linear(2048, 2)
return model
class Model():
model_list = ['VGG19', 'VGG19_2', 'ResNet', 'MyCNN', 'Densenet', 'GoogleNet', 'inceptionv3']
def get_model_list(self):
return self.model_list
def check_model_name(self, name):
if name not in self.model_list:
model_string = '\', \''.join(self.model_list)
sys.exit(f"ModelNameError: '{name}' is not acceptable. The acceptable models are \'{model_string}\'.")
def model_builder(self, model_name, train_all=False):
# check if model name is acceptable
self.check_model_name(model_name)
# load model
model = globals()[model_name](train_all)
return model
if __name__ == '__main__':
# model_list= ['VGG19', 'VGG19_2', 'ResNet', 'MyCNN', 'Densenet', 'GoogleNet', 'inceptionv3']
model = Model().model_builder(Model().get_model_list()[3])
summary(model, input_size=(3,224,224), batch_size=1, device="cpu")
# print(model)
pass | 24.370968 | 114 | 0.654203 | 665 | 0.213278 | 0 | 0 | 0 | 0 | 0 | 0 | 842 | 0.270045 |
d7fc48fb5d168909d619b0248ed17c5cf0a539aa | 63 | py | Python | adventofcode/2020/25/crack_key/__init__.py | bneradt/toy | 982e80ec98f4e951f7275e5f22cb0197f8f86c08 | [
"Apache-2.0"
] | null | null | null | adventofcode/2020/25/crack_key/__init__.py | bneradt/toy | 982e80ec98f4e951f7275e5f22cb0197f8f86c08 | [
"Apache-2.0"
] | null | null | null | adventofcode/2020/25/crack_key/__init__.py | bneradt/toy | 982e80ec98f4e951f7275e5f22cb0197f8f86c08 | [
"Apache-2.0"
] | null | null | null | from .crack_key import derive_loop_size, derive_encryption_key
| 31.5 | 62 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d7fc56b54647de1ec4723b9fa66999b5bea809a7 | 2,324 | py | Python | guitarHarmony/scale.py | fuyuan-li/Guitar-Harmony | 8cd836782bb19cbf39ce4a4722ea52d7988b2cc4 | [
"MIT"
] | null | null | null | guitarHarmony/scale.py | fuyuan-li/Guitar-Harmony | 8cd836782bb19cbf39ce4a4722ea52d7988b2cc4 | [
"MIT"
] | null | null | null | guitarHarmony/scale.py | fuyuan-li/Guitar-Harmony | 8cd836782bb19cbf39ce4a4722ea52d7988b2cc4 | [
"MIT"
] | null | null | null | from .interval import Interval
from .note import Note
class Scale():
"""
The scales class.
"""
scale_recipes = {
'major' : ['M2', 'M3', 'P4', 'P5', 'M6', 'M7', 'P8'],
'natural_minor': ['M2', 'm3', 'P4', 'P5', 'm6', 'm7', 'P8'],
'harmonic_minor': ['M2', 'm3', 'P4', 'P5', 'm6', 'M7', 'P8'],
'melodic_minor': ['M2', 'm3', 'P4', 'P5', 'M6', 'M7', 'P8'],
'dorian': ['M2', 'm3', 'P4', 'P5', 'M6', 'm7', 'P8'],
'locrian': ['m2', 'm3', 'P4', 'd5', 'm6', 'm7', 'P8'],
'lydian': ['M2', 'M3', 'A4', 'P5', 'M6', 'M7', 'P8'],
'mixolydian': ['M2', 'M3', 'P4', 'P5', 'M6', 'm7', 'P8'],
'phrygian': ['m2', 'm3', 'P4', 'P5', 'm6', 'm7', 'P8'],
'major_pentatonic': ['M2', 'M3', 'P5', 'M6', 'P8'],
'minor_pentatonic': ['m3', 'P4', 'P5', 'm7', 'P8'],
'alter': ['m2', 'm3', 'P4', 'd5', 'm6', 'm7', 'P8'],
'blues':['m3', 'P4', 'd5', 'P5', 'm7', 'P8'],
'lydian_dominant':['M2', 'M3', 'A4', 'P5', 'M6', 'm7', 'P8']
}
def __init__(self, root='C', scale_type = 'major'):
self.notes = []
try:
self.notes.append(Note(root))
except:
raise Exception('Invalid root note supplied.')
if scale_type in self.scale_recipes.keys():
self.scale_type = scale_type
else:
raise Exception('Invalid scale type supplied! current valid types: {} '.format(self.scale_recipes.keys()))
self.build_scale()
def all_scales(self):
return self.scale_recipes.keys()
def build_scale(self):
self.add_intervals(self.scale_recipes[self.scale_type][0:])
def add_intervals(self, intervals):
for i in intervals:
self.notes.append(self.notes[0]+Interval(i))
def __repr__(self):
return "Scale(Note({!r}), {!r})".format(str(self.notes[0]), self.scale_type)
def __str__(self):
return "{}{}".format(str(self.notes[0]),self.scale_type)
def __eq__(self, other):
if len(self.notes) != len(other.notes):
#if chords dont have the same number of notes, def not equal
return False
else:
return all(self.notes[i] == other.notes[i] for i in range(len(self.notes)))
| 36.3125 | 118 | 0.493115 | 2,267 | 0.975473 | 0 | 0 | 0 | 0 | 0 | 0 | 757 | 0.325731 |
d7fff513272ef6ea7c292003102cdb722d219305 | 1,549 | py | Python | TextAdventure.py | glors131/Portfolio1 | 64c36e1f1893fe30d0b13f6d419cf75a3fcf03cb | [
"MIT"
] | null | null | null | TextAdventure.py | glors131/Portfolio1 | 64c36e1f1893fe30d0b13f6d419cf75a3fcf03cb | [
"MIT"
] | null | null | null | TextAdventure.py | glors131/Portfolio1 | 64c36e1f1893fe30d0b13f6d419cf75a3fcf03cb | [
"MIT"
] | null | null | null | start = '''You wake up one morning and find yourself in a big crisis.
Trouble has arised and your worst fears have come true. Zoom is out to destroy
the world for good. However, a castrophe has happened and now the love of
your life is in danger. Which do you decide to save today?'''
print(start)
done = False
print(" Type 'Flash to save the world' or 'Flash to save the love of his life' ")
user_input = input()
while not done:
if user_input == "world":
print (" Flash has to fight zoom to save the world. ")
done = True
print("Should Flash use lightening to attack Zoom or read his mind?")
user_input = input()
if user_input == "lightening":
print("Flash defeats Zoom and saves the world!")
done = True
elif user_input == "mind":
print("Flash might be able to defeat Zoom, but is still a disadvantage. ")
done = True
print("Flash is able to save the world.")
elif user_input == "love":
print ("In order to save the love of his life, Flash has to choose between two options. ")
done = True
print("Should Flash give up his power or his life in order to save the love of his life?")
user_input = input()
if user_input == "power":
print("The Flash's speed is gone. But he is given the love of his life back into his hands. ")
done = True
elif user_input == "life":
print("The Flash will die, but he sees that the love of his life is no longer in danger.")
done = True
print("No matter the circumstances, Flash is still alive. ") | 39.717949 | 99 | 0.671401 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,037 | 0.669464 |
cc00205c58c037f18c599c68cbd7da80776abb99 | 3,141 | py | Python | catkin_workspace/src/project_weather_x/scripts/weather_server.py | NarendraPatwardhan/HuskyWeatherCast | 1ffadca23368a497ce7d3003806b548307bb7596 | [
"MIT"
] | null | null | null | catkin_workspace/src/project_weather_x/scripts/weather_server.py | NarendraPatwardhan/HuskyWeatherCast | 1ffadca23368a497ce7d3003806b548307bb7596 | [
"MIT"
] | null | null | null | catkin_workspace/src/project_weather_x/scripts/weather_server.py | NarendraPatwardhan/HuskyWeatherCast | 1ffadca23368a497ce7d3003806b548307bb7596 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import division
from project_weather_x.srv import Weather,WeatherResponse
import os
import rospy
import numpy as np
import urllib2
import cv2
import json
def handle_weather(req):
print "Returning weather data for zipcode [%s]"%(req)
return WeatherResponse(showWeather(processWeather(getWeather(int(req.zip_code)))))
def weather_server():
rospy.init_node('weather_server')
s = rospy.Service('weather', Weather, handle_weather)
print "Ready to get weather data."
rospy.spin()
def getWeather(zip_code):
api_key = '4cfea3182851aa1f0e3cccde3d991277'
api_string = 'http://api.openweathermap.org/data/2.5/weather?zip='
full_api_string = api_string + str(zip_code) + '&mode=json&units=imperial&APPID=' + api_key
url = urllib2.urlopen(full_api_string)
output = url.read().decode('utf-8')
weather_data = json.loads(output)
url.close()
return weather_data
def processWeather(weather_data):
dayOrNight = 0 if (weather_data['dt']>weather_data['sys']['sunset']) else 1
weather_dict = {
'city': weather_data['name'],
'temperature': int(weather_data['main']['temp']),
'day_or_night': dayOrNight}
return weather_dict
def showWeather(weather_dict):
dir_path = os.path.dirname(os.path.realpath(__file__))
sky = cv2.imread(dir_path+'/SkyBackground.jpg',cv2.IMREAD_UNCHANGED)[300:700,400:700,:]/255
icons = cv2.imread(dir_path+'/Weather Icons3.jpg',cv2.IMREAD_UNCHANGED)
icons = icons[15:365,115:465]
icon_set = split_image(icons,7,7,50)
sunIndex = 5 if (weather_dict['day_or_night'] == 0) else 0
canvas = np.ones((400,300,3))-0.01
cv2.putText(canvas,"Hello "+weather_dict['city']+' :)', (80,20), cv2.FONT_HERSHEY_DUPLEX, 0.5, 0,thickness = 2)
canvas[50:100,50:100,:] = icon_set[sunIndex]
if (weather_dict['temperature']>41):
canvas[200:250,125:175,:] = icon_set[2]
cv2.putText(canvas,"enjoy the weather,lucky punks", (20,270), cv2.FONT_HERSHEY_DUPLEX, 0.5, 0,thickness = 2)
elif (weather_dict['temperature']<41):
canvas[200:250,125:175,:] = icon_set[35]
cv2.putText(canvas,"but you better wear a snowjacket", (20,270), cv2.FONT_HERSHEY_DUPLEX, 0.5, 0,thickness = 2)
timeString = 'Carpe Diem' if (weather_dict['day_or_night'] == 1) else 'Carpe Noctum'
cv2.putText(canvas,timeString, (120,80), cv2.FONT_HERSHEY_DUPLEX, 0.5, 0,thickness = 2)
posOrNeg = 100 if (weather_dict['temperature']>0) else 50
cv2.putText(canvas,str(weather_dict['temperature'])+"F", (posOrNeg,175), cv2.FONT_HERSHEY_DUPLEX, 2, 0,thickness = 2)
appImage = cv2.addWeighted(canvas,0.7,sky,0.3,0)
appImageScaled = cv2.resize(appImage,None,fx=1.5,fy=1.5,interpolation = cv2.INTER_CUBIC)
cv2.imshow('Husky Weather App',appImageScaled)
cv2.waitKey(0)
cv2.destroyAllWindows()
return weather_dict['temperature']
def split_image(img,vparts,hparts,size):
images = []
for i in range(vparts):
for j in range(hparts):
images.append(img[i*size:(i+1)*size,j*size:(j+1)*size]/255)
return images
if __name__ == "__main__":
weather_server()
| 41.88 | 121 | 0.69914 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 587 | 0.186883 |
cc00441179a1e29fdd2344705a8b2aea6af01f3f | 2,261 | py | Python | api/views.py | jacerong/normalesp | fd5d675fb1ccc974b40280ae9d18bafb8be39677 | [
"MIT"
] | 2 | 2017-07-12T11:37:18.000Z | 2017-10-12T20:27:02.000Z | api/views.py | jacerong/normalesp | fd5d675fb1ccc974b40280ae9d18bafb8be39677 | [
"MIT"
] | null | null | null | api/views.py | jacerong/normalesp | fd5d675fb1ccc974b40280ae9d18bafb8be39677 | [
"MIT"
] | 2 | 2018-06-10T01:09:24.000Z | 2021-04-29T16:30:50.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os, re, sys
import pickle
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
BASE_PATH = '/'.join(CURRENT_PATH.split('/')[:-1] + ['normalesp',])
sys.path.append(BASE_PATH)
from spell_checking import SpellTweet, _to_str
@api_view(['POST'])
@permission_classes((IsAuthenticated, ))
def spell_checking(request):
"""Vista principal de la API.
paráms:
text: str
Texto que se normalizará.
external_dicc_ip: str
Dirección IP (v4) del diccionario de normalización dependiente de
contexto. Nótese que tal diccionario es externo.
external_dicc_port: str
Puerto por medio de cual se reciben las solicitudes para el diccio-
nario de normalización.
return_normalized_text: boolean
Indica si se retorna el texto normalizado en vez del arreglo de pa-
labras fuera de vocabulario.
"""
spell = None
if ('external_dicc_ip' in request.data
and request.data['external_dicc_ip'] is not None
and 'external_dicc_port' in request.data
and request.data['external_dicc_port'] is not None):
spell = SpellTweet(external_dicc_ip=request.data['external_dicc_ip'],
external_dicc_port=request.data['external_dicc_port'])
else:
spell = SpellTweet()
oov_words = spell.spell_tweet(text=request.data['text'])
# verificar si se solicitó retornar el texto normalizado
if ('return_normalized_text' in request.data
and request.data['return_normalized_text']):
# convertir el texto a unicode
text = request.data['text']
text = text.decode('utf-8') if not isinstance(text, unicode) else text
for oov in oov_words:
text = re.sub(r'(\A|\W)' + oov[2],
r'\1' + oov[3].replace('_', ' '),
text,
count=1, flags=re.U)
return Response({"text": text})
else:
return Response(oov_words)
| 34.257576 | 79 | 0.646617 | 0 | 0 | 0 | 0 | 1,793 | 0.790564 | 0 | 0 | 931 | 0.410494 |
cc0180fa1725a1c40ecec04f320fe9877adad635 | 654 | py | Python | main.py | Answeroid/fb-harvester | 74ebab2345540888ffc2e3b873069fc363218e9c | [
"Apache-2.0"
] | null | null | null | main.py | Answeroid/fb-harvester | 74ebab2345540888ffc2e3b873069fc363218e9c | [
"Apache-2.0"
] | null | null | null | main.py | Answeroid/fb-harvester | 74ebab2345540888ffc2e3b873069fc363218e9c | [
"Apache-2.0"
] | null | null | null | from selenium import webdriver
from fb_auth import auth
from logger import Logger
def main():
# TODO add possibility to login to different FB accounts (use csv file to store them)
# TODO handle all exceptions especially when account was blocked
# TODO save automatic screenshots from time to time
# TODO add native system logger from previous log parser project
instance = Logger()
log = instance.get_instance()
driver = webdriver.PhantomJS("/home/username/node_modules/phantomjs-prebuilt/bin/phantomjs")
driver.get('https://www.facebook.com/')
auth(driver, log)
a = 1
if __name__ == '__main__':
main()
| 27.25 | 96 | 0.721713 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 363 | 0.555046 |
cc02b47da67c564590731ad9d826d475fb336807 | 4,022 | py | Python | src/dag_simulation.py | D-Stacks/kaspad-py-explorer | 75b885ab2f10bafa4f1e86b6e801033c4ae1aabd | [
"MIT"
] | 6 | 2022-01-24T16:58:22.000Z | 2022-03-14T17:10:29.000Z | src/dag_simulation.py | D-Stacks/kaspad-py-explorer | 75b885ab2f10bafa4f1e86b6e801033c4ae1aabd | [
"MIT"
] | null | null | null | src/dag_simulation.py | D-Stacks/kaspad-py-explorer | 75b885ab2f10bafa4f1e86b6e801033c4ae1aabd | [
"MIT"
] | 1 | 2022-03-13T18:57:55.000Z | 2022-03-13T18:57:55.000Z | import uuid
import random
import os
import math
import numpy as np
import simpy
import matplotlib.pyplot as plt
from simulation.ghostdag import block
from simulation.ghostdag.dag import select_ghostdag_k, DAG
from simulation.fakes import FakeDAG
from simulation.channel import Hub, Channel, PlanarTopology
from simulation.helpers import print_dag, print_stats, save_to_json
from simulation.miner import Miner
from simulation.attack_miners import AttackMiner
simulation_time = 2 ** 12
with_attack = True
print_progress = True
def make_dag(genesis_hash, k):
return DAG(k=k, interval=(0, 2 ** 64 - 1), genesis_hash=genesis_hash)
def make_honest_miner(miner_channel, genesis_hash, k, _lambda, _alpha, miner_index, num_miners):
if miner_index == 0:
dag = make_dag(genesis_hash, k)
else:
dag = FakeDAG(genesis_hash=genesis_hash)
return Miner(k, _lambda, 1 / num_miners, dag, miner_channel)
def make_attack_miner(miner_channel, genesis_hash, k, _lambda, _alpha, miner_index, num_miners):
if miner_index == 0:
dag = make_dag(genesis_hash, k)
miner = Miner(-1, _lambda, (1 - _alpha) / (num_miners - 1), dag, miner_channel)
# Only miner 1 is the actual attacker
elif miner_index == 1:
dag = make_dag(genesis_hash, k)
miner = AttackMiner(-1, _lambda, _alpha, dag, miner_channel)
else:
dag = FakeDAG(genesis_hash=genesis_hash)
miner = Miner(-1, _lambda, (1 - _alpha) / (num_miners - 1), dag, miner_channel)
return miner
class Simulation:
def __init__(self, _alpha, _delta, _lambda, rows=3, cols=3, D_factor=1.0, k=None):
# Communication constants
self.D_min, self.D_max = 0.1, np.sqrt((rows * D_factor) ** 2 + (cols * D_factor) ** 2) + 0.1
# Mining parameters
self._alpha = _alpha
self._delta = _delta
self._lambda = _lambda
if k is None:
self.k = select_ghostdag_k(2 * self.D_max * _lambda, _delta)
else:
self.k = k
# Simulation environment
self.env = simpy.Environment()
# Grid topology
self.topology = PlanarTopology(D_min=self.D_min, D_max=self.D_max)
self.hub = Hub(self.env, latency_oracle=self.topology.latency)
self.channels = []
for r in range(rows):
for c in range(cols):
channel = Channel(self.hub)
self.topology.channel_map[channel] = (r * D_factor, c * D_factor)
self.channels.append(channel)
def run_simulation(self, seed=22522):
# Setup and start the simulation
if seed is not None:
np.random.seed(seed)
random.seed(seed)
if print_progress:
print('\n=========\n')
print('GHOSTDAG simulation')
print('\n=========\n')
genesis_hash = uuid.uuid1().int
miners = []
block.reindex_size_trace = []
for i, channel in enumerate(self.channels):
s = str(self.topology.channel_map[channel])
if print_progress:
print('Miner %d coordinates: %s' % (i, s))
if with_attack:
miner = make_attack_miner(channel, genesis_hash, self.k, self._lambda, self._alpha, i, len(self.channels))
else:
miner = make_honest_miner(channel, genesis_hash, self.k, self._lambda, self._alpha, i, len(self.channels))
self.env.process(miner.mine(self.env))
self.env.process(miner.receive(self.env))
if i == 0 and print_progress:
self.env.process(miner.report(self.env))
miners.append(miner)
if print_progress:
print('\n=========\n')
self.env.run(until=simulation_time)
if print_progress:
print('\n=========\n')
return miners[0].dag
def main():
_lambda, _delta, _alpha = 1, 0.01, 0.01
simulation = Simulation(_alpha, _delta, _lambda)
dag = simulation.run_simulation()
# print_dag(dag)
print('\n=========\n')
# Print stats
print_stats(simulation.D_max, _delta, _lambda, dag, simulation.k)
print('\n=========\n')
plt.figure()
plt.plot(block.reindex_size_trace, linewidth=0.1)
plt.xlabel('time')
plt.ylabel('reindex size')
plt.show()
if not os.path.isdir('data'):
os.mkdir('data')
save_to_json(dag, file_name=os.path.join('data', 'dag.json'))
if __name__ == '__main__':
main()
# try:
# main()
# except Exception as ex:
# print(type(ex).__name__, ex) | 27.737931 | 110 | 0.702884 | 1,929 | 0.479612 | 0 | 0 | 0 | 0 | 0 | 0 | 447 | 0.111139 |
cc03f2dbb8df3eafa255b00b9aa7871fbde5777c | 15,140 | py | Python | web/kwmo/kwmo/controllers/skurl_teambox.py | tmbx/kas | 0d4e74d0a8ec0e0f85ba574eb01d389530bdeecc | [
"BSD-3-Clause"
] | null | null | null | web/kwmo/kwmo/controllers/skurl_teambox.py | tmbx/kas | 0d4e74d0a8ec0e0f85ba574eb01d389530bdeecc | [
"BSD-3-Clause"
] | null | null | null | web/kwmo/kwmo/controllers/skurl_teambox.py | tmbx/kas | 0d4e74d0a8ec0e0f85ba574eb01d389530bdeecc | [
"BSD-3-Clause"
] | null | null | null | from kwmo.controllers.abstract_teambox import *
import time
from kwmo.lib.kwmo_kcd_client import KcdClient
from kwmo.lib.config import get_cached_kcd_external_conf_object
from kfs_lib import *
from kwmo.lib.base import init_session
from kwmo.lib.kwmolib import *
from kwmo.model.user import User
from kwmo.model.kfs_node import KfsNode
from kwmo.model.chat_request import ChatRequest
from kwmo.model.ws_request import WSRequest
import kbase
import simplejson
log = logging.getLogger(__name__)
class SkurlTeamboxController(AbstractTeamboxController):
# Internal: check if workspace is public.
def _check_public(self, workspace_id):
if not c.workspace.public:
log.warning("_check_public(): workspace %i is not public." % ( workspace_id ) )
abort(404)
# Internal: login as a skurl user.
def _login(self, user):
session['user'] = user.to_dict()
session['user_id'] = session['user']['id']
c.perms.allow('kfs.download.share.0')
c.perms.allow('kfs.upload.share.0')
session.save()
# Last minute permissions check.
self._check_perms()
# Internal: set chat request permissions.
def _set_chat_requests_perms(self, flag):
if flag:
# Allow chat requests.
c.perms.allow('pubws.req.chat')
else:
# Deny furthur chat requests.
c.perms.deny('pubws.req.chat')
# Internal: set chat permissions.
def _set_chat_perms(self, flag):
if flag:
# Allow chat.
c.perms.allow('chat.list.channel.' + str(session['user_id']))
c.perms.allow('chat.post.channel.' + str(session['user_id']))
else:
# Deny chat.
c.perms.deny('chat.list.channel.' + str(session['user_id']))
c.perms.deny('chat.post.channel.' + str(session['user_id']))
# Internal: set workspace creation requests permissions.
def _set_ws_creation_requests_perms(self, flag):
if flag:
# Deny furthur workspace creation requests.
c.perms.allow('pubws.req.wscreate')
else:
# Allow workspace requests.
c.perms.deny('pubws.req.wscreate')
# Log user out.
def logout(self, workspace_id, email_id):
log.debug("Skurl logout.")
init_session(c.workspace, reinit=True)
ui_flash_info(code='logout', hide_after_ms=5000)
redirect_to(url('teambox_pubws_show', workspace_id=workspace_id, email_id=email_id))
# Show public workspace main page.
def show(self, workspace_id, email_id):
workspace_id = int(workspace_id)
email_id = int(email_id)
# Set logout url.
c.logout_url = url('teambox_pubws_logout', workspace_id=workspace_id, email_id=email_id)
# Check if the workspace is public.
self._check_public(workspace_id)
if 'email_id' in session and session['email_id'] != email_id:
# User is logged but wants to access a different email. Reinit session.
log.debug("Reinitializing session because user is using another email id: previous='%s', new='%s'." \
% ( str(session['email_id']), str(email_id) ) )
init_session(c.workspace, reinit=True)
notif = request.GET.get('notif', 0)
if notif:
# This is the sender (user 1)... [re-]login automatically.
log.debug("User is accessing a public workspace using a notification link... automatically log user in.")
user = User.get_by(workspace_id=workspace_id, id=1)
log.debug("Reinitializing session because user is logging as user 1 (notif management).")
init_session(c.workspace, reinit=True)
self._login(user)
c.notif_flag = True
else:
if 'user' in session and session['user'] and session['user']['id'] == 1:
# Sender is logged (as a sender) but he's using a regular skurl link: logout.
log.debug("Reinitializing session because user was logged as user 1 but is using a regular skurl link.")
init_session(c.workspace, reinit=True)
if not c.perms.hasRole('skurl'):
# Give skurl role, if not already done.
c.perms.addRole('skurl')
# Save session.
session.save()
if not 'email_id' in session:
# Set email information in session.
# Instantiate a Kcd client.
kc = KcdClient(get_cached_kcd_external_conf_object())
# Check that email ID is valid.
email_info = kc.pubws_get_email_info(workspace_id, email_id)
if not email_info:
log.debug("PubWS: invalild email ID: %i" % ( email_id ) )
abort(404)
# Get the email sender.
sender_user = User.get_by(workspace_id=workspace_id, id=1)
sender = kbase.PropStore()
sender.name = sender_user.real_name
sender.email = sender_user.email
# Get the email recipients (list of PropStores, having name and email keys).
raw_recipients = kc.pubws_get_eid_recipient_identities(workspace_id, email_id)
# Strip sender email from recipients, if needed.
recipients = []
for recipient in raw_recipients:
if recipient.email != sender.email:
recipients.append(recipient)
# Merge sender and recipients.
identities = [sender] + recipients
# Set needed informations in session.
session['email_id'] = email_id
session['email_info'] = email_info.to_dict()
session['identities'] = map(lambda x: x.to_dict(), identities)
session.save()
# Get informations that will be published in the template.
c.dyn_version = 15
c.email_info = session['email_info']
c.json_email_info_str = simplejson.dumps(c.email_info)
c.identities = session['identities']
c.json_identities_str = simplejson.dumps(c.identities)
# Check if a chat request was accepted lately (delay is hardcoded in accepted_lately()).
c.user_id = None
if 'user_id' in session and session['user_id']:
c.user_id = session['user_id']
if ChatRequest.accepted_lately(workspace_id, session['user_id']):
# Deny chat requests and allow chat since a request was accepted lately.
self._set_chat_requests_perms(False)
self._set_chat_perms(True)
else:
# Allow chat requests and deny chat since no request was accepted lately.
self._set_chat_requests_perms(True)
self._set_chat_perms(False)
# Allow workspace creation request.
self._set_ws_creation_requests_perms(True)
# Save session.
session.save()
c.base_url_paths = kurl.get_base_url_paths(
'teambox_updater',
'teambox_post_chat',
'teambox_download',
'teambox_upload',
'teambox_pubws_set_identity',
'teambox_pubws_chat_request',
'teambox_pubws_chat_request_result',
'teambox_pubws_kfsup_request',
'teambox_pubws_kfsdown_request',
'teambox_pubws_create_request')
# Get first update directly.
flags = ( StateRequest.STATE_FORCE_SYNC
| StateRequest.STATE_WANT_PERMS
| StateRequest.STATE_WANT_MEMBERS
| StateRequest.STATE_WANT_KFS
| StateRequest.STATE_WANT_PUBWS_INFO )
params = { }
if 'user_id' in session and session['user_id']:
flags |= StateRequest.STATE_WANT_CHAT
params['chat_channel_id'] = session['user_id']
updater_state_dict = state_request_get(c, session, flags, params)
c.updater_state_json = simplejson.dumps(updater_state_dict)
return render('/teambox/pubwsshow.mako')
# Get a user ID matching the identity ID selected by the user.
# If user is not invited, he is invited first.
@kjsonify
def pb_set_identity(self, workspace_id):
import select
from kcd_lib import WorkspaceInvitee
workspace_id = int(workspace_id)
# Get the workspace.
if not c.workspace.public:
log.warning("pb_set_identity: Workspace %i is not public." % ( workspace_id ) )
abort(404)
# Get POST parameters.
identity_id = request.params['identity_id']
identity_id = int(identity_id)
# Shortcuts
identity = session['identities'][identity_id]
log.debug("Recipient: %s" % ( str(identity) ) )
if identity_id == 0:
# This is the sender (user 1).
user = User.get_by(workspace_id=workspace_id, id=1)
self._login(user)
log.debug("Found matching user(0): '%s'." % ( str(user) ) )
return { 'result' : 'ok', 'user' : session['user'] }
# This is a real recipient... try to get the user.
user = User.get_by(workspace_id=workspace_id, email=identity['email'])
if user:
self._login(user)
log.debug("Found matching user(1): '%s'." % ( str(user) ) )
return { 'result' : 'ok', 'user' : session['user'] }
# Instantiate a Kcd client.
kc = KcdClient(get_cached_kcd_external_conf_object())
# Invite user.
invitee = WorkspaceInvitee(real_name=identity['name'], email_address=identity['email'])
junk_url, invitees = kc.invite_users(workspace_id, "empty message", [invitee])
if invitees[0].error:
log.debug("User could not be invited: '%s'." % ( str(invitees[0].error) ) )
raise Exception('Internal error.')
# Get user. If not present, retry a few times, until new user is fetched by kwsfetcher or until timeout.
wait_seconds = 0.5
timeout_seconds = 8
time_started = time.time()
while 1:
# Get user, if it exists (fetched by kwsfetcher).
user = User.get_by(workspace_id=workspace_id, email=identity['email'])
if user:
self._login(user)
log.debug("Found matching user (2): '%s'." % ( str(user) ) )
return { 'result' : 'ok', 'user' : session['user'] }
# Check for timeout.
if time.time() > time_started + timeout_seconds: break
# Wait
select.select([], [], [], wait_seconds)
# Reached timeout.
log.error("Error: reached end of pb_set_identity(). KWSFetcher might be too loaded or down.");
raise Exception('Temporary server error: please try again later.');
# Internal: do stuff related to every pubws request.
def _request_common(self, workspace_id):
# Check that the user is logged.
if not session['user']:
log.error("_request_common(): user is not logged.")
abort(404)
# Instantiate a Kcd client in the context-global variable.
c.pubws_kc = KcdClient(get_cached_kcd_external_conf_object())
# PubWS chat request.
@kjsonify
def chat_request(self, workspace_id):
workspace_id = int(workspace_id)
# Do some checks and initialization.
self._check_public(workspace_id)
self._request_common(workspace_id)
# Time to allow the workspace owner to respond.
# Keep PubWSChat javascript object code in sync for the global chat
# request timeout (which must be a little longer than this one).
req_timeout = 60
# Shortcuts.
user_id = session['user']['id']
subject = session['email_info']['subject']
# Post request.
chat_req_id = c.pubws_kc.pubws_chat_request(workspace_id, user_id, c.workspace.compat_v2, subject, req_timeout)
log.debug("Chat request: got chat_req_id '%i'." % ( chat_req_id ) )
return { "chat_req_id" : chat_req_id }
# PubWS chat request result request.
@kjsonify
def chat_request_result(self, workspace_id, req_id):
workspace_id = int(workspace_id)
req_id = int(req_id)
req_start_time = request.params['req_start_time']
# Do some checks and initialization.
self._check_public(workspace_id)
self._request_common(workspace_id)
# Get the request.
req = ChatRequest.get_by(workspace_id=workspace_id, request_id=req_id)
if req:
# Check request status.
if req.accepted:
# Modify permissions.
self._set_chat_requests_perms(False)
self._set_chat_perms(True)
# Save session.
session.save()
log.debug("chat_request_result(): accepted.")
return { "result" : "ok" }
# Enable when debugging to enable automatic chat acceptation.
if 0:
from kanp import KANP_MINOR
from pylons import config
kc = KcdClient(get_cached_kcd_external_conf_object())
# This function has to be rewritten.
kc.pubws_chat_request_accept(workspace_id, user_id, KANP_MINOR, req_id)
else:
# Bad request ID or kwsfetcher has not yet fetched the request.
pass
log.debug("chat_request_result(): pending, chat_req_id='%s', req_start_time='%s'." \
% ( str(req_id), str(req_start_time) ) )
return { "result" : "pending", "chat_req_id" : req_id, 'req_start_time' : req_start_time }
# PubWS KFS upload request.
@kjsonify
def kfs_upload_request(self, workspace_id):
workspace_id = int(workspace_id)
# Do some checks and initialization.
self._check_public(workspace_id)
self._request_common(workspace_id)
# No-op
return { "result" : "ok" }
# PubWS KFS download request.
@kjsonify
def kfs_download_request(self, workspace_id):
workspace_id = int(workspace_id)
# Do some checks and initialization.
self._check_public(workspace_id)
self._request_common(workspace_id)
# No-op
return { "result" : "ok" }
# PubWS workspace creation request.
@kjsonify
def ws_create_request(self, workspace_id):
workspace_id = int(workspace_id)
# Do some checks and initialization.
self._check_public(workspace_id)
self._request_common(workspace_id)
# Shortcuts.
user_id = session['user']['id']
subject = session['email_info']['subject']
# Post request.
req_id = c.pubws_kc.pubws_workspace_creation_request(workspace_id, user_id, c.workspace.compat_v2, subject)
# Modify permissions.
self._set_ws_creation_requests_perms(False)
# Save permissions.
session.save()
return { "result" : "ready" }
| 37.755611 | 120 | 0.611757 | 14,640 | 0.966975 | 0 | 0 | 6,281 | 0.414861 | 0 | 0 | 4,997 | 0.330053 |
cc048c5fb136986da892240b2448b0adce46347e | 1,310 | py | Python | src/modules/agents/n_rnn_agent.py | Sud0x67/mrmix | 4f4784e421c768509bd007e21b4455b56edc7cd2 | [
"Apache-2.0"
] | 4 | 2022-03-17T05:01:30.000Z | 2022-03-17T05:09:17.000Z | src/modules/agents/n_rnn_agent.py | Sud0x67/mrmix | 4f4784e421c768509bd007e21b4455b56edc7cd2 | [
"Apache-2.0"
] | null | null | null | src/modules/agents/n_rnn_agent.py | Sud0x67/mrmix | 4f4784e421c768509bd007e21b4455b56edc7cd2 | [
"Apache-2.0"
] | null | null | null | import torch.nn as nn
import torch.nn.functional as F
import torch as th
import numpy as np
import torch.nn.init as init
class NRNNAgent(nn.Module):
def __init__(self, input_shape, args):
super(NRNNAgent, self).__init__()
self.args = args
self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)
self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)
# self.apply(weights_init)
def init_hidden(self):
# make hidden states on same device as model
# the new() here api is not elegant
# todo
# return self.fc1.weight.new_zeros(1, self.args.rnn_hidden_dim)
return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
def forward(self, inputs, hidden_state):
# 通常input应该是四维数据 n_episode * 1_tansition * n_agent * n_observation
# 可以是三维b代表batch_size, a 代表agent, e代表oberservation维度
# 这里应该没有n_agent, 推测
b, a, e = inputs.size()
x = F.relu(self.fc1(inputs.view(-1, e)), inplace=True) # (b*a, e) --> (b*a, h)
h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim) #(b*a, h)
h = self.rnn(x, h_in)
q = self.fc2(h)
return q.view(b, a, -1), h.view(b, a, -1) | 36.388889 | 87 | 0.629771 | 1,250 | 0.911079 | 0 | 0 | 0 | 0 | 0 | 0 | 404 | 0.294461 |
cc05b7b2af248ade11dd6dd58ed1ebc18eeafea1 | 3,971 | py | Python | netsav/server/server.py | Turgon37/netsav | 082ed2e938e006f15820e95f920c632f0ce952a4 | [
"MIT"
] | null | null | null | netsav/server/server.py | Turgon37/netsav | 082ed2e938e006f15820e95f920c632f0ce952a4 | [
"MIT"
] | null | null | null | netsav/server/server.py | Turgon37/netsav | 082ed2e938e006f15820e95f920c632f0ce952a4 | [
"MIT"
] | null | null | null | # -*- coding: utf8 -*-
# This file is a part of netsav
#
# Copyright (c) 2014-2015 Pierre GINDRAUD
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# System imports
import logging
from socket import error as Error
from threading import Thread
# Projet Imports
from ..httpteepotreply.httpteepotreply import HttpTeepotReply
# Global project declarations
sys_log = logging.getLogger('netsav')
class Server(Thread):
"""Simple HTTP Server class that make light answer to http queries
"""
def __init__(self, event):
"""Constructor : Build a server object that will open
@param[threading.Event] event : the event object which define
this tread life state
"""
# a synchronised event that indicates the continuity of the thread
self.__event_stop = event
self.address = None
self.port = None
self.log_client = True
# Server instance
self.http = None
Thread.__init__(self, name='HTTP_SERVER')
def load(self, config):
"""
@param[dict] config : a dictionnary that contain address and port
key-value
@return[boolean] : True if load success
False otherwise
"""
if isinstance(config, dict):
self.address = config['address']
self.port = config['port']
if 'log_client' in config:
self.log_client = config['log_client']
else:
raise Exception('Invalid configuration type')
self.http = HttpTeepotReply(self.address,
self.port,
sys_log,
bind_and_activate=False,
log_client=self.log_client)
return True
def open(self):
"""Bind network socket
@return[boolean] : True if bind success
False otherwise
"""
if not (self.address and self.port):
sys_log.error('Invalid server network configuration')
return False
# Open socket separatly for checking bind permissions
try:
self.http.server_bind()
self.http.server_activate()
except Error:
sys_log.error("Unable to open socket on port %s", self.port)
return False
# Run the server
sys_log.debug("Opening local server socket on %s:%s",
self.address,
self.port)
return True
def close(self):
"""Close network socket
"""
try:
self.getServerInstance().socket.close()
sys_log.debug('Closing local server socket')
except Error:
sys_log.error('Unable to close server socket')
def getServerInstance(self):
"""Return the HTTP server instance
"""
return self.http
def run(self):
"""Run the thread
"""
http = self.getServerInstance()
while not self.__event_stop.isSet():
http.timeout = 0.5
http.handle_request()
self.__event_stop.wait(0.5)
# close the socket
self.close()
| 31.023438 | 80 | 0.657517 | 2,566 | 0.646185 | 0 | 0 | 0 | 0 | 0 | 0 | 2,299 | 0.578947 |
cc05df24c5983f52047b132f8c3884d5ead3eb09 | 16,921 | py | Python | veros/setups/global_flexible/global_flexible.py | AkasDutta/veros | 9f530596a0148a398829050017de3e01a71261a0 | [
"MIT"
] | 115 | 2019-11-23T02:31:30.000Z | 2022-03-29T12:58:30.000Z | veros/setups/global_flexible/global_flexible.py | AkasDutta/veros | 9f530596a0148a398829050017de3e01a71261a0 | [
"MIT"
] | 207 | 2019-11-21T13:21:22.000Z | 2022-03-31T23:36:09.000Z | veros/setups/global_flexible/global_flexible.py | AkasDutta/veros | 9f530596a0148a398829050017de3e01a71261a0 | [
"MIT"
] | 21 | 2020-01-28T13:13:39.000Z | 2022-02-02T13:46:33.000Z | #!/usr/bin/env python
import os
import h5netcdf
import scipy.ndimage
from veros import veros_routine, veros_kernel, KernelOutput, VerosSetup, runtime_settings as rs, runtime_state as rst
from veros.variables import Variable, allocate
from veros.core.utilities import enforce_boundaries
from veros.core.operators import numpy as npx, update, at
import veros.tools
import veros.time
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_FILES = veros.tools.get_assets("global_flexible", os.path.join(BASE_PATH, "assets.json"))
class GlobalFlexibleResolutionSetup(VerosSetup):
"""
Global model with flexible resolution.
"""
# global settings
min_depth = 10.0
max_depth = 5400.0
equatorial_grid_spacing_factor = 0.5
polar_grid_spacing_factor = None
@veros_routine
def set_parameter(self, state):
settings = state.settings
settings.identifier = "UNNAMED"
settings.nx = 360
settings.ny = 160
settings.nz = 60
settings.dt_mom = settings.dt_tracer = 900
settings.runlen = 86400 * 10
settings.x_origin = 90.0
settings.y_origin = -80.0
settings.coord_degree = True
settings.enable_cyclic_x = True
# friction
settings.enable_hor_friction = True
settings.A_h = 5e4
settings.enable_hor_friction_cos_scaling = True
settings.hor_friction_cosPower = 1
settings.enable_tempsalt_sources = True
settings.enable_implicit_vert_friction = True
settings.eq_of_state_type = 5
# isoneutral
settings.enable_neutral_diffusion = True
settings.K_iso_0 = 1000.0
settings.K_iso_steep = 50.0
settings.iso_dslope = 0.005
settings.iso_slopec = 0.005
settings.enable_skew_diffusion = True
# tke
settings.enable_tke = True
settings.c_k = 0.1
settings.c_eps = 0.7
settings.alpha_tke = 30.0
settings.mxl_min = 1e-8
settings.tke_mxl_choice = 2
settings.kappaM_min = 2e-4
settings.kappaH_min = 2e-5
settings.enable_kappaH_profile = True
settings.enable_tke_superbee_advection = True
# eke
settings.enable_eke = True
settings.eke_k_max = 1e4
settings.eke_c_k = 0.4
settings.eke_c_eps = 0.5
settings.eke_cross = 2.0
settings.eke_crhin = 1.0
settings.eke_lmin = 100.0
settings.enable_eke_superbee_advection = True
settings.enable_eke_isopycnal_diffusion = True
# idemix
settings.enable_idemix = False
settings.enable_eke_diss_surfbot = True
settings.eke_diss_surfbot_frac = 0.2
settings.enable_idemix_superbee_advection = True
settings.enable_idemix_hor_diffusion = True
# custom variables
state.dimensions["nmonths"] = 12
state.var_meta.update(
t_star=Variable("t_star", ("xt", "yt", "nmonths"), "", "", time_dependent=False),
s_star=Variable("s_star", ("xt", "yt", "nmonths"), "", "", time_dependent=False),
qnec=Variable("qnec", ("xt", "yt", "nmonths"), "", "", time_dependent=False),
qnet=Variable("qnet", ("xt", "yt", "nmonths"), "", "", time_dependent=False),
qsol=Variable("qsol", ("xt", "yt", "nmonths"), "", "", time_dependent=False),
divpen_shortwave=Variable("divpen_shortwave", ("zt",), "", "", time_dependent=False),
taux=Variable("taux", ("xt", "yt", "nmonths"), "", "", time_dependent=False),
tauy=Variable("tauy", ("xt", "yt", "nmonths"), "", "", time_dependent=False),
)
def _get_data(self, var, idx=None):
if idx is None:
idx = Ellipsis
else:
idx = idx[::-1]
kwargs = {}
if rst.proc_num > 1:
kwargs.update(
driver="mpio",
comm=rs.mpi_comm,
)
with h5netcdf.File(DATA_FILES["forcing"], "r", **kwargs) as forcing_file:
var_obj = forcing_file.variables[var]
return npx.array(var_obj[idx]).T
@veros_routine(dist_safe=False, local_variables=["dxt", "dyt", "dzt"])
def set_grid(self, state):
vs = state.variables
settings = state.settings
if settings.ny % 2:
raise ValueError("ny has to be an even number of grid cells")
vs.dxt = update(vs.dxt, at[...], 360.0 / settings.nx)
if self.equatorial_grid_spacing_factor is not None:
eq_spacing = self.equatorial_grid_spacing_factor * 160.0 / settings.ny
else:
eq_spacing = None
if self.polar_grid_spacing_factor is not None:
polar_spacing = self.polar_grid_spacing_factor * 160.0 / settings.ny
else:
polar_spacing = None
vs.dyt = update(
vs.dyt,
at[2:-2],
veros.tools.get_vinokur_grid_steps(
settings.ny, 160.0, eq_spacing, upper_stepsize=polar_spacing, two_sided_grid=True
),
)
vs.dzt = veros.tools.get_vinokur_grid_steps(settings.nz, self.max_depth, self.min_depth, refine_towards="lower")
@veros_routine
def set_coriolis(self, state):
vs = state.variables
settings = state.settings
vs.coriolis_t = update(
vs.coriolis_t, at[...], 2 * settings.omega * npx.sin(vs.yt[npx.newaxis, :] / 180.0 * settings.pi)
)
def _shift_longitude_array(self, vs, lon, arr):
wrap_i = npx.where((lon[:-1] < vs.xt.min()) & (lon[1:] >= vs.xt.min()))[0][0]
new_lon = npx.concatenate((lon[wrap_i:-1], lon[:wrap_i] + 360.0))
new_arr = npx.concatenate((arr[wrap_i:-1, ...], arr[:wrap_i, ...]))
return new_lon, new_arr
@veros_routine(dist_safe=False, local_variables=["kbot", "xt", "yt", "zt"])
def set_topography(self, state):
vs = state.variables
settings = state.settings
with h5netcdf.File(DATA_FILES["topography"], "r") as topography_file:
topo_x, topo_y, topo_z = (npx.array(topography_file.variables[k], dtype="float").T for k in ("x", "y", "z"))
topo_z = npx.minimum(topo_z, 0.0)
# smooth topography to match grid resolution
gaussian_sigma = (0.5 * len(topo_x) / settings.nx, 0.5 * len(topo_y) / settings.ny)
topo_z_smoothed = scipy.ndimage.gaussian_filter(topo_z, sigma=gaussian_sigma)
topo_z_smoothed = npx.where(topo_z >= -1, 0, topo_z_smoothed)
topo_x_shifted, topo_z_shifted = self._shift_longitude_array(vs, topo_x, topo_z_smoothed)
coords = (vs.xt[2:-2], vs.yt[2:-2])
z_interp = allocate(state.dimensions, ("xt", "yt"), local=False)
z_interp = update(
z_interp,
at[2:-2, 2:-2],
veros.tools.interpolate((topo_x_shifted, topo_y), topo_z_shifted, coords, kind="nearest", fill=False),
)
depth_levels = 1 + npx.argmin(npx.abs(z_interp[:, :, npx.newaxis] - vs.zt[npx.newaxis, npx.newaxis, :]), axis=2)
vs.kbot = update(vs.kbot, at[2:-2, 2:-2], npx.where(z_interp < 0.0, depth_levels, 0)[2:-2, 2:-2])
vs.kbot = npx.where(vs.kbot < settings.nz, vs.kbot, 0)
vs.kbot = enforce_boundaries(vs.kbot, settings.enable_cyclic_x, local=True)
# remove marginal seas
# (dilate to close 1-cell passages, fill holes, undo dilation)
marginal = scipy.ndimage.binary_erosion(
scipy.ndimage.binary_fill_holes(scipy.ndimage.binary_dilation(vs.kbot == 0))
)
vs.kbot = npx.where(marginal, 0, vs.kbot)
@veros_routine
def set_initial_conditions(self, state):
vs = state.variables
settings = state.settings
rpart_shortwave = 0.58
efold1_shortwave = 0.35
efold2_shortwave = 23.0
t_grid = (vs.xt[2:-2], vs.yt[2:-2], vs.zt)
xt_forc, yt_forc, zt_forc = (self._get_data(k) for k in ("xt", "yt", "zt"))
zt_forc = zt_forc[::-1]
# coordinates must be monotonous for this to work
assert npx.diff(xt_forc).all() > 0
assert npx.diff(yt_forc).all() > 0
# determine slice to read from forcing file
data_subset = (
slice(
max(0, int(npx.argmax(xt_forc >= vs.xt.min())) - 1),
len(xt_forc) - max(0, int(npx.argmax(xt_forc[::-1] <= vs.xt.max())) - 1),
),
slice(
max(0, int(npx.argmax(yt_forc >= vs.yt.min())) - 1),
len(yt_forc) - max(0, int(npx.argmax(yt_forc[::-1] <= vs.yt.max())) - 1),
),
Ellipsis,
)
xt_forc = xt_forc[data_subset[0]]
yt_forc = yt_forc[data_subset[1]]
# initial conditions
temp_raw = self._get_data("temperature", idx=data_subset)[..., ::-1]
temp_data = veros.tools.interpolate((xt_forc, yt_forc, zt_forc), temp_raw, t_grid)
vs.temp = update(vs.temp, at[2:-2, 2:-2, :, :], (temp_data * vs.maskT[2:-2, 2:-2, :])[..., npx.newaxis])
salt_raw = self._get_data("salinity", idx=data_subset)[..., ::-1]
salt_data = veros.tools.interpolate((xt_forc, yt_forc, zt_forc), salt_raw, t_grid)
vs.salt = update(vs.salt, at[2:-2, 2:-2, :, :], (salt_data * vs.maskT[2:-2, 2:-2, :])[..., npx.newaxis])
# wind stress on MIT grid
time_grid = (vs.xt[2:-2], vs.yt[2:-2], npx.arange(12))
taux_raw = self._get_data("tau_x", idx=data_subset)
taux_data = veros.tools.interpolate((xt_forc, yt_forc, npx.arange(12)), taux_raw, time_grid)
vs.taux = update(vs.taux, at[2:-2, 2:-2, :], taux_data)
tauy_raw = self._get_data("tau_y", idx=data_subset)
tauy_data = veros.tools.interpolate((xt_forc, yt_forc, npx.arange(12)), tauy_raw, time_grid)
vs.tauy = update(vs.tauy, at[2:-2, 2:-2, :], tauy_data)
vs.taux = enforce_boundaries(vs.taux, settings.enable_cyclic_x)
vs.tauy = enforce_boundaries(vs.tauy, settings.enable_cyclic_x)
# Qnet and dQ/dT and Qsol
qnet_raw = self._get_data("q_net", idx=data_subset)
qnet_data = veros.tools.interpolate((xt_forc, yt_forc, npx.arange(12)), qnet_raw, time_grid)
vs.qnet = update(vs.qnet, at[2:-2, 2:-2, :], -qnet_data * vs.maskT[2:-2, 2:-2, -1, npx.newaxis])
qnec_raw = self._get_data("dqdt", idx=data_subset)
qnec_data = veros.tools.interpolate((xt_forc, yt_forc, npx.arange(12)), qnec_raw, time_grid)
vs.qnec = update(vs.qnec, at[2:-2, 2:-2, :], qnec_data * vs.maskT[2:-2, 2:-2, -1, npx.newaxis])
qsol_raw = self._get_data("swf", idx=data_subset)
qsol_data = veros.tools.interpolate((xt_forc, yt_forc, npx.arange(12)), qsol_raw, time_grid)
vs.qsol = update(vs.qsol, at[2:-2, 2:-2, :], -qsol_data * vs.maskT[2:-2, 2:-2, -1, npx.newaxis])
# SST and SSS
sst_raw = self._get_data("sst", idx=data_subset)
sst_data = veros.tools.interpolate((xt_forc, yt_forc, npx.arange(12)), sst_raw, time_grid)
vs.t_star = update(vs.t_star, at[2:-2, 2:-2, :], sst_data * vs.maskT[2:-2, 2:-2, -1, npx.newaxis])
sss_raw = self._get_data("sss", idx=data_subset)
sss_data = veros.tools.interpolate((xt_forc, yt_forc, npx.arange(12)), sss_raw, time_grid)
vs.s_star = update(vs.s_star, at[2:-2, 2:-2, :], sss_data * vs.maskT[2:-2, 2:-2, -1, npx.newaxis])
if settings.enable_idemix:
tidal_energy_raw = self._get_data("tidal_energy", idx=data_subset)
tidal_energy_data = veros.tools.interpolate((xt_forc, yt_forc), tidal_energy_raw, t_grid[:-1])
mask_x, mask_y = (i + 2 for i in npx.indices((vs.nx, vs.ny)))
mask_z = npx.maximum(0, vs.kbot[2:-2, 2:-2] - 1)
tidal_energy_data[:, :] *= vs.maskW[mask_x, mask_y, mask_z] / vs.rho_0
vs.forc_iw_bottom[2:-2, 2:-2] = tidal_energy_data
"""
Initialize penetration profile for solar radiation and store divergence in divpen
note that pen is set to 0.0 at the surface instead of 1.0 to compensate for the
shortwave part of the total surface flux
"""
swarg1 = vs.zw / efold1_shortwave
swarg2 = vs.zw / efold2_shortwave
pen = rpart_shortwave * npx.exp(swarg1) + (1.0 - rpart_shortwave) * npx.exp(swarg2)
pen = update(pen, at[-1], 0.0)
vs.divpen_shortwave = update(vs.divpen_shortwave, at[1:], (pen[1:] - pen[:-1]) / vs.dzt[1:])
vs.divpen_shortwave = update(vs.divpen_shortwave, at[0], pen[0] / vs.dzt[0])
@veros_routine
def set_forcing(self, state):
vs = state.variables
vs.update(set_forcing_kernel(state))
@veros_routine
def set_diagnostics(self, state):
settings = state.settings
diagnostics = state.diagnostics
diagnostics["cfl_monitor"].output_frequency = settings.dt_tracer * 100
diagnostics["tracer_monitor"].output_frequency = settings.dt_tracer * 100
diagnostics["snapshot"].output_frequency = 30 * 86400.0
diagnostics["overturning"].output_frequency = 360 * 86400
diagnostics["overturning"].sampling_frequency = 86400.0
diagnostics["energy"].output_frequency = 360 * 86400
diagnostics["energy"].sampling_frequency = 10 * settings.dt_tracer
diagnostics["averages"].output_frequency = 30 * 86400
diagnostics["averages"].sampling_frequency = settings.dt_tracer
average_vars = [
"surface_taux",
"surface_tauy",
"forc_temp_surface",
"forc_salt_surface",
"psi",
"temp",
"salt",
"u",
"v",
"w",
"Nsqr",
"Hd",
"rho",
"kappaH",
]
if settings.enable_skew_diffusion:
average_vars += ["B1_gm", "B2_gm"]
if settings.enable_TEM_friction:
average_vars += ["kappa_gm", "K_diss_gm"]
if settings.enable_tke:
average_vars += ["tke", "Prandtlnumber", "mxl", "tke_diss", "forc_tke_surface", "tke_surf_corr"]
if settings.enable_idemix:
average_vars += ["E_iw", "forc_iw_surface", "iw_diss", "c0", "v0"]
if settings.enable_eke:
average_vars += ["eke", "K_gm", "L_rossby", "L_rhines"]
diagnostics["averages"].output_variables = average_vars
@veros_routine
def after_timestep(self, state):
pass
@veros_kernel
def set_forcing_kernel(state):
vs = state.variables
settings = state.settings
t_rest = 30.0 * 86400.0
cp_0 = 3991.86795711963 # J/kg /K
year_in_seconds = veros.time.convert_time(1.0, "years", "seconds")
(n1, f1), (n2, f2) = veros.tools.get_periodic_interval(vs.time, year_in_seconds, year_in_seconds / 12.0, 12)
# linearly interpolate wind stress and shift from MITgcm U/V grid to this grid
vs.surface_taux = update(vs.surface_taux, at[:-1, :], f1 * vs.taux[1:, :, n1] + f2 * vs.taux[1:, :, n2])
vs.surface_tauy = update(vs.surface_tauy, at[:, :-1], f1 * vs.tauy[:, 1:, n1] + f2 * vs.tauy[:, 1:, n2])
if settings.enable_tke:
vs.forc_tke_surface = update(
vs.forc_tke_surface,
at[1:-1, 1:-1],
npx.sqrt(
(0.5 * (vs.surface_taux[1:-1, 1:-1] + vs.surface_taux[:-2, 1:-1]) / settings.rho_0) ** 2
+ (0.5 * (vs.surface_tauy[1:-1, 1:-1] + vs.surface_tauy[1:-1, :-2]) / settings.rho_0) ** 2
)
** (3.0 / 2.0),
)
# W/m^2 K kg/J m^3/kg = K m/s
t_star_cur = f1 * vs.t_star[..., n1] + f2 * vs.t_star[..., n2]
qqnec = f1 * vs.qnec[..., n1] + f2 * vs.qnec[..., n2]
qqnet = f1 * vs.qnet[..., n1] + f2 * vs.qnet[..., n2]
vs.forc_temp_surface = (
(qqnet + qqnec * (t_star_cur - vs.temp[..., -1, vs.tau])) * vs.maskT[..., -1] / cp_0 / settings.rho_0
)
s_star_cur = f1 * vs.s_star[..., n1] + f2 * vs.s_star[..., n2]
vs.forc_salt_surface = 1.0 / t_rest * (s_star_cur - vs.salt[..., -1, vs.tau]) * vs.maskT[..., -1] * vs.dzt[-1]
# apply simple ice mask
mask1 = vs.temp[:, :, -1, vs.tau] * vs.maskT[:, :, -1] > -1.8
mask2 = vs.forc_temp_surface > 0
ice = npx.logical_or(mask1, mask2)
vs.forc_temp_surface *= ice
vs.forc_salt_surface *= ice
# solar radiation
if settings.enable_tempsalt_sources:
vs.temp_source = (
(f1 * vs.qsol[..., n1, None] + f2 * vs.qsol[..., n2, None])
* vs.divpen_shortwave[None, None, :]
* ice[..., None]
* vs.maskT[..., :]
/ cp_0
/ settings.rho_0
)
return KernelOutput(
surface_taux=vs.surface_taux,
surface_tauy=vs.surface_tauy,
temp_source=vs.temp_source,
forc_tke_surface=vs.forc_tke_surface,
forc_temp_surface=vs.forc_temp_surface,
forc_salt_surface=vs.forc_salt_surface,
)
| 40.577938 | 120 | 0.598428 | 13,916 | 0.82241 | 0 | 0 | 15,291 | 0.90367 | 0 | 0 | 1,774 | 0.10484 |
cc05fe3911cd4a15db68fbad7e204898aea18d54 | 2,000 | py | Python | gamma-correction/gamma_correction.py | svinkapeppa/image_analysis | 1b19f4646e784cac30a9fdabc8404fd5e6057996 | [
"MIT"
] | null | null | null | gamma-correction/gamma_correction.py | svinkapeppa/image_analysis | 1b19f4646e784cac30a9fdabc8404fd5e6057996 | [
"MIT"
] | null | null | null | gamma-correction/gamma_correction.py | svinkapeppa/image_analysis | 1b19f4646e784cac30a9fdabc8404fd5e6057996 | [
"MIT"
] | null | null | null | import argparse
import os
import cv2
import numpy as np
def gamma_correction(source_path, destination_path, a, b, version):
# Load image into memory
# Algorithm can work correctly with colored and grayscale images
if version == 'colored':
original_image = cv2.imread(source_path)
elif version == 'grayscale':
original_image = cv2.imread(source_path, cv2.IMREAD_GRAYSCALE)
else:
# Other types are not supported
raise RuntimeError('Wrong type of image')
# Apply formula to rescaled image
processed_image = a * ((original_image / 255) ** b)
# Crop values, that are too high
processed_image[processed_image >= 1] = 1
# Scale image back to [0 - 255]
processed_image = processed_image * 255
# Correctly convert float values to integers
processed_image = np.rint(processed_image)
# Convert to `np.uint8`, so `imwrite` will save image correctly
cv2.imwrite(destination_path, processed_image.astype(np.uint8))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Perform gamma correction.')
parser.add_argument('source_path', metavar='source_path', type=str,
help='Path to the original image.')
parser.add_argument('destination_path', metavar='destination_path', type=str,
help='Path to the processed image.')
parser.add_argument('a', metavar='a', type=float,
help='First parameter of gamma correction algorithm.')
parser.add_argument('b', metavar='b', type=float,
help='Second parameter of gamma correction algorithm.')
parser.add_argument('--version', type=str, default='colored',
help='Shows type of image. Variants: colored / grayscale.')
args = parser.parse_args()
if not os.path.exists(args.source_path):
raise FileNotFoundError
gamma_correction(args.source_path, args.destination_path, args.a, args.b, args.version)
| 39.215686 | 91 | 0.675 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 703 | 0.3515 |
cc06737fd80122fed31e5f3c512bcb4a1a2e717e | 2,154 | py | Python | src/cirrus_ngs/deprecated/dnaSeq/WGSPipelineManager.py | miko-798/cirrus-ngs | 2c005f0fe29e298652ed2164e08ada75e908229b | [
"MIT"
] | 1 | 2018-03-31T18:20:27.000Z | 2018-03-31T18:20:27.000Z | src/cirrus_ngs/deprecated/dnaSeq/WGSPipelineManager.py | miko-798/cirrus-ngs | 2c005f0fe29e298652ed2164e08ada75e908229b | [
"MIT"
] | null | null | null | src/cirrus_ngs/deprecated/dnaSeq/WGSPipelineManager.py | miko-798/cirrus-ngs | 2c005f0fe29e298652ed2164e08ada75e908229b | [
"MIT"
] | null | null | null | __author__ = 'Guorong Xu<g1xu@ucsd.edu>'
import os
from util import YamlFileMaker
from util import QstatParser
from cfnCluster import ConnectionManager
import sys
workspace = "/shared/workspace/Pipelines/"
#log_dir = "/shared/workspace/data_archive/DNASeq/{}/logs"
log_dir = "/shared/workspace/logs/DNASeq/{}"
## executing WGS pipeline with the specific yaml file
def execute(ssh_client, project_name, analysis_steps, s3_input_files_address,
sample_list, group_name, s3_output_files_address, email):
yaml_file = project_name + ".yaml"
global log_dir
log_dir = log_dir.format(project_name)
print("making the yaml file ...")
YamlFileMaker.make_yaml_file(yaml_file, project_name, analysis_steps, s3_input_files_address,
sample_list, group_name, s3_output_files_address, "hg19", "NA")
print("copying yaml files to remote master node...")
ConnectionManager.copy_file(ssh_client, yaml_file, workspace + "yaml_examples")
os.remove(yaml_file)
#if not email == "":
print("executing pipeline...")
ConnectionManager.execute_command(ssh_client, "qsub -o /dev/null -e /dev/null " + workspace + "scripts/run.sh "
+ workspace + "yaml_examples/" + yaml_file + " " + log_dir + " " + "WGSPipeline.py")
## checking your jobs status
def check_status(ssh_client, job_name):
print("checking processing status")
qstat = ConnectionManager.execute_command(ssh_client, "qstat")
job_ids = QstatParser.get_job_ids(qstat)
job_details = [ConnectionManager.execute_command(ssh_client,
"qstat -j %s" % x[0]) for x in job_ids]
job_info = [job_ids[x] + [job_details[x]] for x in range(len(job_ids))]
global log_dir
logs = ConnectionManager.list_dir(ssh_client, log_dir)
QstatParser.parse_qstat(job_info, job_name, logs)
## checking your jobs status
def check_jobs_status(ssh_client):
print("checking jobs status")
ConnectionManager.execute_command(ssh_client, "qstat")
## checking your host status
def check_host_status(ssh_client):
print("checking qhost status")
ConnectionManager.execute_command(ssh_client, "qhost")
| 35.311475 | 115 | 0.724234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 628 | 0.291551 |
cc0b0aa90c189510f88d632e9c48035e5079064b | 960 | py | Python | code/12_paketierung/intents/functions/location/intent_location.py | padmalcom/AISpeechAssistant | b7501a23a8f513acb5043f3c7bb06df129bdc2cc | [
"Apache-2.0"
] | 1 | 2021-09-08T09:21:16.000Z | 2021-09-08T09:21:16.000Z | code/12_paketierung/intents/functions/location/intent_location.py | padmalcom/AISpeechAssistant | b7501a23a8f513acb5043f3c7bb06df129bdc2cc | [
"Apache-2.0"
] | null | null | null | code/12_paketierung/intents/functions/location/intent_location.py | padmalcom/AISpeechAssistant | b7501a23a8f513acb5043f3c7bb06df129bdc2cc | [
"Apache-2.0"
] | 2 | 2022-02-06T09:54:40.000Z | 2022-03-01T07:52:51.000Z | from loguru import logger
from chatbot import register_call
import global_variables
import random
import os
import yaml
import geocoder
import constants
@register_call("location")
def location(session_id = "general", dummy=0):
config_path = constants.find_data_file(os.path.join('intents','functions','location','config_location.yml'))
cfg = None
with open(config_path, "r", encoding='utf-8') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
if not cfg:
logger.error("Konnte Konfigurationsdatei für die Lokalisierung nicht lesen.")
return ""
# Holen der Sprache aus der globalen Konfigurationsdatei
LANGUAGE = global_variables.voice_assistant.cfg['assistant']['language']
YOU_ARE_HERE = random.choice(cfg['intent']['location'][LANGUAGE]['youarehere'])
# Ermittle den Standort mittels IP
loc = geocoder.ip('me')
logger.debug("Random template {} and city {}", YOU_ARE_HERE, loc.city)
return YOU_ARE_HERE.format(loc.city) | 30.967742 | 109 | 0.760417 | 0 | 0 | 0 | 0 | 807 | 0.83975 | 0 | 0 | 323 | 0.336108 |
cc0ba0fc63266122471cb93b1a794001da14190a | 531 | py | Python | sagemaker/ssedata.py | cleveranjos/Rapid-ML-Gateway | 10a14abfce3351791331642c47eddfbf622e76d2 | [
"MIT"
] | 147 | 2017-06-26T11:25:12.000Z | 2022-03-21T22:59:25.000Z | sagemaker/ssedata.py | cleveranjos/Rapid-ML-Gateway | 10a14abfce3351791331642c47eddfbf622e76d2 | [
"MIT"
] | 28 | 2017-06-30T14:00:36.000Z | 2021-03-08T14:13:57.000Z | sagemaker/ssedata.py | cleveranjos/Rapid-ML-Gateway | 10a14abfce3351791331642c47eddfbf622e76d2 | [
"MIT"
] | 123 | 2017-06-27T14:08:18.000Z | 2022-01-05T06:26:31.000Z | from enum import Enum
class ArgType(Enum):
"""
Represents data types that can be used
as arguments in different script functions.
"""
Undefined = -1
Empty = 0
String = 1
Numeric = 2
Mixed = 3
class ReturnType(Enum):
"""
Represents return types that can
be used in script evaluation.
"""
Undefined = -1
String = 0
Numeric = 1
Dual = 2
class FunctionType(Enum):
"""
Represents function types.
"""
Scalar = 0
Aggregation = 1
Tensor = 2
| 15.617647 | 47 | 0.585687 | 500 | 0.94162 | 0 | 0 | 0 | 0 | 0 | 0 | 226 | 0.425612 |
cc0d3e51514d98fa6f88a3ee098b80505c67d4d5 | 10,188 | py | Python | ferminet/utils/system.py | shishaochen/ferminet | 319cfd2a19c243a287e6e252ef590329fdbd15ea | [
"Apache-2.0"
] | 469 | 2020-10-19T16:40:06.000Z | 2022-03-31T01:43:19.000Z | ferminet/utils/system.py | shishaochen/ferminet | 319cfd2a19c243a287e6e252ef590329fdbd15ea | [
"Apache-2.0"
] | 35 | 2020-11-20T04:31:59.000Z | 2022-03-28T08:34:27.000Z | ferminet/utils/system.py | shishaochen/ferminet | 319cfd2a19c243a287e6e252ef590329fdbd15ea | [
"Apache-2.0"
] | 86 | 2020-10-19T19:57:59.000Z | 2022-03-30T07:35:17.000Z | # Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to create different kinds of systems."""
from typing import Sequence
import attr
from ferminet.utils import elements
from ferminet.utils import units as unit_conversion
import numpy as np
# Default bond lengths in angstrom for some diatomics.
# Bond lengths from either the G3 dataset:
# 1. http://www.cse.anl.gov/OldCHMwebsiteContent/compmat/comptherm.htm
# 2. L. A. Curtiss, P. C. Redfern, K. Raghavachari, and J. A. Pople,
# J. Chem. Phys, 109, 42 (1998).
# or from NIST (https://cccbdb.nist.gov/diatomicexpbondx.asp).
diatomic_bond_lengths = {
'BeH': 1.348263,
'CN': 1.134797,
'ClF': 1.659091,
'F2': 1.420604,
'H2': 0.737164,
'HCl': 1.2799799,
'Li2': 2.77306,
'LiH': 1.639999,
'N2': 1.129978,
'NH': 1.039428,
'CO': 1.150338,
'BH': 1.2324,
'PN': 1.491,
'AlH': 1.648,
'AlN': 1.786,
}
# Default spin polarisation for a few diatomics of interest.
# Otherwise default to either singlet (doublet) for even (odd) numbers of
# electrons. Units: number of unpaired electrons.
diatomic_spin_polarisation = {
'B2': 2,
'O2': 2,
'NH': 2,
'AlN': 2,
}
@attr.s
class Atom: # pytype: disable=invalid-function-definition
"""Atom information for Hamiltonians.
The nuclear charge is inferred from the symbol if not given, in which case the
symbol must be the IUPAC symbol of the desired element.
Attributes:
symbol: Element symbol.
coords: An iterable of atomic coordinates. Always a list of floats and in
bohr after initialisation. Default: place atom at origin.
charge: Nuclear charge. Default: nuclear charge (atomic number) of atom of
the given name.
atomic_number: Atomic number associated with element. Default: atomic number
of element of the given symbol. Should match charge unless fractional
nuclear charges are being used.
units: String giving units of coords. Either bohr or angstrom. Default:
bohr. If angstrom, coords are converted to be in bohr and units to the
string 'bohr'.
coords_angstrom: list of atomic coordinates in angstrom.
coords_array: Numpy array of atomic coordinates in bohr.
element: elements.Element corresponding to the symbol.
"""
symbol = attr.ib()
coords = attr.ib(
converter=lambda xs: tuple(float(x) for x in xs),
default=(0.0, 0.0, 0.0)) # type: Sequence[float]
charge = attr.ib(converter=float)
atomic_number = attr.ib(converter=int)
units = attr.ib(
default='bohr', validator=attr.validators.in_(['bohr', 'angstrom']))
@charge.default
def _set_default_charge(self):
return self.element.atomic_number
@atomic_number.default
def _set_default_atomic_number(self):
return self.element.atomic_number
def __attrs_post_init__(self):
if self.units == 'angstrom':
self.coords = [unit_conversion.angstrom2bohr(x) for x in self.coords]
self.units = 'bohr'
@property
def coords_angstrom(self):
return [unit_conversion.bohr2angstrom(x) for x in self.coords]
@property
def coords_array(self):
if not hasattr(self, '_coords_arr'):
self._coords_arr = np.array(self.coords)
return self._coords_arr
@property
def element(self):
return elements.SYMBOLS[self.symbol]
def atom(symbol, spins=None, charge=0):
"""Return configuration for a single atom.
Args:
symbol: The atomic symbol from the periodic table
spins (optional): A tuple with the number of spin-up and spin-down electrons
charge (optional): If zero (default), create a neutral atom, otherwise
create an anion if charge is negative or cation if charge is positive.
Returns:
A list with a single Atom object located at zero, and a tuple with the spin
configuration of the electrons.
"""
atomic_number = elements.SYMBOLS[symbol].atomic_number
if charge > atomic_number:
raise ValueError('Cannot have a cation with charge larger than the '
'atomic number. Charge: {}, Atomic Number{}'.format(
charge, atomic_number))
if spins is None:
spin_polarisation = elements.ATOMIC_NUMS[atomic_number-charge].spin_config
nalpha = (atomic_number + spin_polarisation) // 2
spins = (nalpha, atomic_number - charge - nalpha)
return [Atom(symbol=symbol, coords=(0.0, 0.0, 0.0))], spins
def diatomic(symbol1, symbol2, bond_length, spins=None, charge=0, units='bohr'):
"""Return configuration for a diatomic molecule."""
if spins is None:
atomic_number_1 = elements.SYMBOLS[symbol1].atomic_number
atomic_number_2 = elements.SYMBOLS[symbol2].atomic_number
total_charge = atomic_number_1 + atomic_number_2 - charge
if total_charge % 2 == 0:
spins = (total_charge // 2, total_charge // 2)
else:
spins = ((total_charge + 1)// 2, (total_charge - 1) // 2)
return [
Atom(symbol=symbol1, coords=(0.0, 0.0, bond_length/2.0), units=units),
Atom(symbol=symbol2, coords=(0.0, 0.0, -bond_length/2.0), units=units)
], spins
def molecule(symbol, bond_length=0.0, units='bohr'):
"""Hardcoded molecular geometries from the original Fermi Net paper."""
if symbol in diatomic_bond_lengths:
if symbol[-1] == '2':
symbs = [symbol[:-1], symbol[:-1]]
else: # Split a camel-case string on the second capital letter
split_idx = None
for i in range(1, len(symbol)):
if split_idx is None and symbol[i].isupper():
split_idx = i
if split_idx is None:
raise ValueError('Cannot find second atomic symbol: {}'.format(symbol))
symbs = [symbol[:split_idx], symbol[split_idx:]]
atomic_number_1 = elements.SYMBOLS[symbs[0]].atomic_number
atomic_number_2 = elements.SYMBOLS[symbs[1]].atomic_number
total_charge = atomic_number_1 + atomic_number_2
if symbol in diatomic_spin_polarisation:
spin_pol = diatomic_spin_polarisation[symbol]
spins = ((total_charge + spin_pol) // 2, (total_charge + spin_pol) // 2)
elif total_charge % 2 == 0:
spins = (total_charge // 2, total_charge // 2)
else:
spins = ((total_charge + 1)// 2, (total_charge - 1) // 2)
if bond_length == 0.0:
bond_length = diatomic_bond_lengths[symbol]
units = 'angstrom'
return diatomic(symbs[0], symbs[1],
bond_length,
units=units,
spins=spins)
if bond_length != 0.0:
raise ValueError('Bond length argument only appropriate for diatomics.')
if symbol == 'CH4':
return [
Atom(symbol='C', coords=(0.0, 0.0, 0.0), units='bohr'),
Atom(symbol='H', coords=(1.18886, 1.18886, 1.18886), units='bohr'),
Atom(symbol='H', coords=(-1.18886, -1.18886, 1.18886), units='bohr'),
Atom(symbol='H', coords=(1.18886, -1.18886, -1.18886), units='bohr'),
Atom(symbol='H', coords=(-1.18886, 1.18886, -1.18886), units='bohr'),
], (5, 5)
if symbol == 'NH3':
return [
Atom(symbol='N', coords=(0.0, 0.0, 0.22013), units='bohr'),
Atom(symbol='H', coords=(0.0, 1.77583, -0.51364), units='bohr'),
Atom(symbol='H', coords=(1.53791, -0.88791, -0.51364), units='bohr'),
Atom(symbol='H', coords=(-1.53791, -0.88791, -0.51364), units='bohr'),
], (5, 5)
if symbol in ('C2H4', 'ethene', 'ethylene'):
return [
Atom(symbol='C', coords=(0.0, 0.0, 1.26135), units='bohr'),
Atom(symbol='C', coords=(0.0, 0.0, -1.26135), units='bohr'),
Atom(symbol='H', coords=(0.0, 1.74390, 2.33889), units='bohr'),
Atom(symbol='H', coords=(0.0, -1.74390, 2.33889), units='bohr'),
Atom(symbol='H', coords=(0.0, 1.74390, -2.33889), units='bohr'),
Atom(symbol='H', coords=(0.0, -1.74390, -2.33889), units='bohr'),
], (8, 8)
if symbol in ('C4H6', 'bicyclobutane'):
return [
Atom(symbol='C', coords=(0.0, 2.13792, 0.58661), units='bohr'),
Atom(symbol='C', coords=(0.0, -2.13792, 0.58661), units='bohr'),
Atom(symbol='C', coords=(1.41342, 0.0, -0.58924), units='bohr'),
Atom(symbol='C', coords=(-1.41342, 0.0, -0.58924), units='bohr'),
Atom(symbol='H', coords=(0.0, 2.33765, 2.64110), units='bohr'),
Atom(symbol='H', coords=(0.0, 3.92566, -0.43023), units='bohr'),
Atom(symbol='H', coords=(0.0, -2.33765, 2.64110), units='bohr'),
Atom(symbol='H', coords=(0.0, -3.92566, -0.43023), units='bohr'),
Atom(symbol='H', coords=(2.67285, 0.0, -2.19514), units='bohr'),
Atom(symbol='H', coords=(-2.67285, 0.0, -2.19514), units='bohr'),
], (15, 15)
raise ValueError('Not a recognized molecule: {}'.format(symbol))
def hn(n, r, charge=0, units='bohr'):
"""Return a hydrogen chain with n atoms and separation r."""
m = n - charge # number of electrons
if m % 2 == 0:
spins = (m//2, m//2)
else:
spins = ((m+1)//2, (m-1)//2)
lim = r * (n-1) / 2.0
return [Atom(symbol='H', coords=(0.0, 0.0, z), units=units)
for z in np.linspace(-lim, lim, n)], spins
def h4_circle(r, theta, units='bohr'):
"""Return 4 hydrogen atoms arranged in a circle, a failure case of CCSD(T)."""
return [
Atom(symbol='H',
coords=(r*np.cos(theta), r*np.sin(theta), 0.0),
units=units),
Atom(symbol='H',
coords=(-r*np.cos(theta), r*np.sin(theta), 0.0),
units=units),
Atom(symbol='H',
coords=(r*np.cos(theta), -r*np.sin(theta), 0.0),
units=units),
Atom(symbol='H',
coords=(-r*np.cos(theta), -r*np.sin(theta), 0.0),
units=units)
], (2, 2)
| 37.594096 | 80 | 0.641441 | 2,106 | 0.206714 | 0 | 0 | 2,114 | 0.207499 | 0 | 0 | 3,767 | 0.369749 |
cc0fa341d500758a4666f87a2715835852c484f0 | 12,526 | py | Python | vb2py/PythonCard/tools/codeEditor/codeEditorR.rsrc.py | ceprio/xl_vb2py | 899fec0301140fd8bd313e8c80b3fa839b3f5ee4 | [
"BSD-3-Clause"
] | null | null | null | vb2py/PythonCard/tools/codeEditor/codeEditorR.rsrc.py | ceprio/xl_vb2py | 899fec0301140fd8bd313e8c80b3fa839b3f5ee4 | [
"BSD-3-Clause"
] | null | null | null | vb2py/PythonCard/tools/codeEditor/codeEditorR.rsrc.py | ceprio/xl_vb2py | 899fec0301140fd8bd313e8c80b3fa839b3f5ee4 | [
"BSD-3-Clause"
] | null | null | null | {'application':{'type':'Application',
'name':'codeEditor',
'backgrounds': [
{'type':'Background',
'name':'bgCodeEditor',
'title':'Code Editor R PythonCard Application',
'size':(400, 300),
'statusBar':1,
'visible':0,
'style':['resizeable'],
'visible':0,
'menubar': {'type':'MenuBar',
'menus': [
{'type':'Menu',
'name':'menuFile',
'label':'&File',
'items': [
{'type':'MenuItem',
'name':'menuFileNewWindow',
'label':'New Window',
},
{'type':'MenuItem',
'name':'menuFileNew',
'label':'&New\tCtrl+N',
},
{'type':'MenuItem',
'name':'menuFileOpen',
'label':'&Open\tCtrl+O',
},
{'type':'MenuItem',
'name':'menuFileSave',
'label':'&Save\tCtrl+S',
},
{'type':'MenuItem',
'name':'menuFileSaveAs',
'label':'Save &As...',
},
{'type':'MenuItem',
'name':'fileSep1',
'label':'-',
},
{'type':'MenuItem',
'name':'menuFileCheckSyntax',
'label':'&Check Syntax (Module)\tAlt+F5',
'command':'checkSyntax',
},
{'type':'MenuItem',
'name':'menuFileRun',
'label':'&Run\tCtrl+R',
'command':'fileRun',
},
{'type':'MenuItem',
'name':'menuFileRunWithInterpreter',
'label':'Run with &interpreter\tCtrl+Shift+R',
'command':'fileRunWithInterpreter',
},
{'type':'MenuItem',
'name':'menuFileRunOptions',
'label':'Run Options...',
'command':'fileRunOptions',
},
{'type':'MenuItem',
'name':'fileSep2',
'label':'-',
},
{'type':'MenuItem',
'name':'menuFilePageSetup',
'label':'Page Set&up...',
},
{'type':'MenuItem',
'name':'menuFilePrint',
'label':'&Print...\tCtrl+P',
},
{'type':'MenuItem',
'name':'menuFilePrintPreview',
'label':'Print Pre&view',
},
{'type':'MenuItem',
'name':'fileSep2',
'label':'-',
},
{'type':'MenuItem',
'name':'menuFileExit',
'label':'E&xit\tAlt+X',
'command':'exit',
},
]
},
{'type':'Menu',
'name':'Edit',
'label':'&Edit',
'items': [
{'type':'MenuItem',
'name':'menuEditUndo',
'label':'&Undo\tCtrl+Z',
},
{'type':'MenuItem',
'name':'menuEditRedo',
'label':'&Redo\tCtrl+Y',
},
{'type':'MenuItem',
'name':'editSep1',
'label':'-',
},
{'type':'MenuItem',
'name':'menuEditCut',
'label':'Cu&t\tCtrl+X',
},
{'type':'MenuItem',
'name':'menuEditCopy',
'label':'&Copy\tCtrl+C',
},
{'type':'MenuItem',
'name':'menuEditPaste',
'label':'&Paste\tCtrl+V',
},
{'type':'MenuItem',
'name':'editSep2',
'label':'-',
},
{'type':'MenuItem',
'name':'menuEditFind',
'label':'&Find...\tCtrl+F',
'command':'doEditFind',
},
{'type':'MenuItem',
'name':'menuEditFindNext',
'label':'&Find Next\tF3',
'command':'doEditFindNext',
},
{'type':'MenuItem',
'name':'menuEditFindFiles',
'label':'Find in Files...\tAlt+F3',
'command':'findFiles',
},
{'type':'MenuItem',
'name':'menuEditReplace',
'label':'&Replace...\tCtrl+H',
'command':'doEditFindReplace',
},
{'type':'MenuItem',
'name':'menuEditGoTo',
'label':'&Go To...\tCtrl+G',
'command':'doEditGoTo',
},
{'type':'MenuItem',
'name':'editSep3',
'label':'-',
},
{'type':'MenuItem',
'name':'menuEditReplaceTabs',
'label':'&Replace tabs with spaces',
'command':'doEditReplaceTabs',
},
{'type':'MenuItem',
'name':'editSep3',
'label':'-',
},
{'type':'MenuItem',
'name':'menuEditClear',
'label':'Cle&ar\tDel',
},
{'type':'MenuItem',
'name':'menuEditSelectAll',
'label':'Select A&ll\tCtrl+A',
},
{'type':'MenuItem',
'name':'editSep4',
'label':'-',
},
{'type':'MenuItem',
'name':'menuEditIndentRegion',
'label':'&Indent Region',
'command':'indentRegion',
},
{'type':'MenuItem',
'name':'menuEditDedentRegion',
'label':'&Dedent Region',
'command':'dedentRegion',
},
{'type':'MenuItem',
'name':'menuEditCommentRegion',
'label':'Comment &out region\tAlt+3',
'command':'commentRegion',
},
{'type':'MenuItem',
'name':'menuEditUncommentRegion',
'label':'U&ncomment region\tShift+Alt+3',
'command':'uncommentRegion',
},
]
},
{'type':'Menu',
'name':'menuView',
'label':'&View',
'items': [
{'type':'MenuItem',
'name':'menuViewWhitespace',
'label':'&Whitespace',
'checkable':1,
},
{'type':'MenuItem',
'name':'menuViewIndentationGuides',
'label':'Indentation &guides',
'checkable':1,
},
{'type':'MenuItem',
'name':'menuViewRightEdgeIndicator',
'label':'&Right edge indicator',
'checkable':1,
},
{'type':'MenuItem',
'name':'menuViewEndOfLineMarkers',
'label':'&End-of-line markers',
'checkable':1,
},
{'type':'MenuItem',
'name':'menuViewFixedFont',
'label':'&Fixed Font',
'enabled':0,
'checkable':1,
},
{'type':'MenuItem',
'name':'viewSep1',
'label':'-',
},
{'type':'MenuItem',
'name':'menuViewLineNumbers',
'label':'&Line Numbers',
'checkable':1,
'checked':1,
},
{'type':'MenuItem',
'name':'menuViewCodeFolding',
'label':'&Code Folding',
'checkable':1,
'checked':0,
},
]
},
{'type':'Menu',
'name':'menuFormat',
'label':'F&ormat',
'items': [
{'type':'MenuItem',
'name':'menuFormatStyles',
'label':'&Styles...',
'command':'doSetStyles',
},
{'type':'MenuItem',
'name':'menuFormatWrap',
'label':'&Wrap Lines',
'checkable':1,
},
]
},
{'type':'Menu',
'name':'menuScriptlet',
'label':'&Shell',
'items': [
{'type':'MenuItem',
'name':'menuScriptletShell',
'label':'&Shell Window\tF5',
},
{'type':'MenuItem',
'name':'menuScriptletNamespace',
'label':'&Namespace Window\tF6',
},
{'type':'MenuItem',
'name':'scriptletSep1',
'label':'-',
},
{'type':'MenuItem',
'name':'menuScriptletSaveShellSelection',
'label':'Save Shell Selection...',
},
{'type':'MenuItem',
'name':'menuScriptletRunScriptlet',
'label':'Run Scriptlet...',
},
]
},
{'type':'Menu',
'name':'menuHelp',
'label':'&Help',
'items': [
{'type':'MenuItem',
'name':'menuShellDocumentation',
'label':'&Shell Documentation...',
'command':'showShellDocumentation',
},
{'type':'MenuItem',
'name':'menuPythonCardDocumentation',
'label':'&PythonCard Documentation...\tF1',
'command':'showPythonCardDocumentation',
},
{'type':'MenuItem',
'name':'menuPythonDocumentation',
'label':'Python &Documentation...',
'command':'showPythonDocumentation',
},
{'type':'MenuItem',
'name':'helpSep1',
'label':'-',
},
{'type':'MenuItem',
'name':'menuHelpAbout',
'label':'&About codeEditor...',
'command':'doHelpAbout',
},
]
},
]
},
'strings': {
'saveAs':'Save As',
'about':'About codeEditor...',
'saveAsWildcard':'All files (*.*)|*.*|Python scripts (*.py;*.pyw)|*.pyw;*.PY;*.PYW;*.py|Text files (*.txt;*.text)|*.text;*.TXT;*.TEXT;*.txt|HTML and XML files (*.htm;*.html;*.xml)|*.htm;*.xml;*.HTM;*.HTML;*.XML;*.html',
'chars':'chars',
'gotoLine':'Goto line',
'lines':'lines',
'gotoLineNumber':'Goto line number:',
'documentChangedPrompt':'The text in the %s file has changed.\n\nDo you want to save the changes?',
'untitled':'Untitled',
'sample':'codeEditor sample',
'codeEditor':'codeEditor',
'replaced':'Replaced %d occurances',
'words':'words',
'openFile':'Open file',
'scriptletWildcard':'Python files (*.py)|*.py|All Files (*.*)|*.*',
'document':'Document',
},
'components': [
{'type':'Choice',
'name':'popComponentNames',
},
{'type':'Choice',
'name':'popComponentEvents',
},
{'type':'CodeEditor',
'name':'document',
'position':(0, 0),
'size':(250, 100),
},
] # end components
} # end background
] # end backgrounds
} }
| 35.284507 | 228 | 0.345042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,664 | 0.452179 |
cc126dbbfecd1ff026b9c6b831a847190b8423eb | 911 | py | Python | unittest_reinvent/diversity_filter_tests/test_murcko_scaffold_superfluous_addition.py | MolecularAI/reinvent-scoring | f7e052ceeffd29e17e1672c33607189873c82a45 | [
"MIT"
] | null | null | null | unittest_reinvent/diversity_filter_tests/test_murcko_scaffold_superfluous_addition.py | MolecularAI/reinvent-scoring | f7e052ceeffd29e17e1672c33607189873c82a45 | [
"MIT"
] | 2 | 2021-11-01T23:19:42.000Z | 2021-11-22T23:41:39.000Z | unittest_reinvent/diversity_filter_tests/test_murcko_scaffold_superfluous_addition.py | MolecularAI/reinvent-scoring | f7e052ceeffd29e17e1672c33607189873c82a45 | [
"MIT"
] | 2 | 2021-11-18T13:14:22.000Z | 2022-03-16T07:52:57.000Z | from reinvent_scoring.scoring.diversity_filters.curriculum_learning.update_diversity_filter_dto import \
UpdateDiversityFilterDTO
from unittest_reinvent.diversity_filter_tests.test_murcko_scaffold_base import BaseMurckoScaffoldFilter
from unittest_reinvent.diversity_filter_tests.fixtures import tanimoto_scaffold_filter_arrangement
from unittest_reinvent.fixtures.test_data import ASPIRIN
class TestMurckoScaffoldSuperfluousAddition(BaseMurckoScaffoldFilter):
def setUp(self):
super().setUp()
# try to add a smile already present
final_summary = tanimoto_scaffold_filter_arrangement([ASPIRIN], [1.0], [0])
self.update_dto = UpdateDiversityFilterDTO(final_summary, [])
def test_superfluous_addition(self):
self.scaffold_filter.update_score(self.update_dto)
self.assertEqual(2, self.scaffold_filter._diversity_filter_memory.number_of_scaffolds())
| 45.55 | 104 | 0.815587 | 514 | 0.564215 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.039517 |
cc14238d3a5df01ff014ec40e1bf90f7a196fccb | 18,170 | py | Python | python/examples/kaitai/msgpack.py | carsonharmon/binaryninja-api | f7ad332ad69d370aa29cd54f4c7307da4d9173e2 | [
"MIT"
] | 1 | 2021-04-05T15:01:23.000Z | 2021-04-05T15:01:23.000Z | python/examples/kaitai/msgpack.py | carsonharmon/binaryninja-api | f7ad332ad69d370aa29cd54f4c7307da4d9173e2 | [
"MIT"
] | null | null | null | python/examples/kaitai/msgpack.py | carsonharmon/binaryninja-api | f7ad332ad69d370aa29cd54f4c7307da4d9173e2 | [
"MIT"
] | 1 | 2021-06-10T04:27:19.000Z | 2021-06-10T04:27:19.000Z | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
from .kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO
import collections
if parse_version(ks_version) < parse_version('0.7'):
raise Exception("Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version))
class Msgpack(KaitaiStruct):
"""MessagePack (msgpack) is a system to serialize arbitrary structured
data into a compact binary stream.
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md
"""
SEQ_FIELDS = ["b1", "int_extra", "float_32_value", "float_64_value", "str_len_8", "str_len_16", "str_len_32", "str_value", "num_array_elements_16", "num_array_elements_32", "array_elements", "num_map_elements_16", "num_map_elements_32", "map_elements"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['b1']['start'] = self._io.pos()
self.b1 = self._io.read_u1()
self._debug['b1']['end'] = self._io.pos()
self._debug['int_extra']['start'] = self._io.pos()
_on = self.b1
if _on == 211:
self.int_extra = self._io.read_s8be()
elif _on == 209:
self.int_extra = self._io.read_s2be()
elif _on == 210:
self.int_extra = self._io.read_s4be()
elif _on == 208:
self.int_extra = self._io.read_s1()
elif _on == 205:
self.int_extra = self._io.read_u2be()
elif _on == 207:
self.int_extra = self._io.read_u8be()
elif _on == 204:
self.int_extra = self._io.read_u1()
elif _on == 206:
self.int_extra = self._io.read_u4be()
self._debug['int_extra']['end'] = self._io.pos()
if self.is_float_32:
self._debug['float_32_value']['start'] = self._io.pos()
self.float_32_value = self._io.read_f4be()
self._debug['float_32_value']['end'] = self._io.pos()
if self.is_float_64:
self._debug['float_64_value']['start'] = self._io.pos()
self.float_64_value = self._io.read_f8be()
self._debug['float_64_value']['end'] = self._io.pos()
if self.is_str_8:
self._debug['str_len_8']['start'] = self._io.pos()
self.str_len_8 = self._io.read_u1()
self._debug['str_len_8']['end'] = self._io.pos()
if self.is_str_16:
self._debug['str_len_16']['start'] = self._io.pos()
self.str_len_16 = self._io.read_u2be()
self._debug['str_len_16']['end'] = self._io.pos()
if self.is_str_32:
self._debug['str_len_32']['start'] = self._io.pos()
self.str_len_32 = self._io.read_u4be()
self._debug['str_len_32']['end'] = self._io.pos()
if self.is_str:
self._debug['str_value']['start'] = self._io.pos()
self.str_value = (self._io.read_bytes(self.str_len)).decode(u"UTF-8")
self._debug['str_value']['end'] = self._io.pos()
if self.is_array_16:
self._debug['num_array_elements_16']['start'] = self._io.pos()
self.num_array_elements_16 = self._io.read_u2be()
self._debug['num_array_elements_16']['end'] = self._io.pos()
if self.is_array_32:
self._debug['num_array_elements_32']['start'] = self._io.pos()
self.num_array_elements_32 = self._io.read_u4be()
self._debug['num_array_elements_32']['end'] = self._io.pos()
if self.is_array:
self._debug['array_elements']['start'] = self._io.pos()
self.array_elements = [None] * (self.num_array_elements)
for i in range(self.num_array_elements):
if not 'arr' in self._debug['array_elements']:
self._debug['array_elements']['arr'] = []
self._debug['array_elements']['arr'].append({'start': self._io.pos()})
_t_array_elements = Msgpack(self._io)
_t_array_elements._read()
self.array_elements[i] = _t_array_elements
self._debug['array_elements']['arr'][i]['end'] = self._io.pos()
self._debug['array_elements']['end'] = self._io.pos()
if self.is_map_16:
self._debug['num_map_elements_16']['start'] = self._io.pos()
self.num_map_elements_16 = self._io.read_u2be()
self._debug['num_map_elements_16']['end'] = self._io.pos()
if self.is_map_32:
self._debug['num_map_elements_32']['start'] = self._io.pos()
self.num_map_elements_32 = self._io.read_u4be()
self._debug['num_map_elements_32']['end'] = self._io.pos()
if self.is_map:
self._debug['map_elements']['start'] = self._io.pos()
self.map_elements = [None] * (self.num_map_elements)
for i in range(self.num_map_elements):
if not 'arr' in self._debug['map_elements']:
self._debug['map_elements']['arr'] = []
self._debug['map_elements']['arr'].append({'start': self._io.pos()})
_t_map_elements = self._root.MapTuple(self._io, self, self._root)
_t_map_elements._read()
self.map_elements[i] = _t_map_elements
self._debug['map_elements']['arr'][i]['end'] = self._io.pos()
self._debug['map_elements']['end'] = self._io.pos()
class MapTuple(KaitaiStruct):
SEQ_FIELDS = ["key", "value"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['key']['start'] = self._io.pos()
self.key = Msgpack(self._io)
self.key._read()
self._debug['key']['end'] = self._io.pos()
self._debug['value']['start'] = self._io.pos()
self.value = Msgpack(self._io)
self.value._read()
self._debug['value']['end'] = self._io.pos()
@property
def is_array_32(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-array
"""
if hasattr(self, '_m_is_array_32'):
return self._m_is_array_32 if hasattr(self, '_m_is_array_32') else None
self._m_is_array_32 = self.b1 == 221
return self._m_is_array_32 if hasattr(self, '_m_is_array_32') else None
@property
def int_value(self):
if hasattr(self, '_m_int_value'):
return self._m_int_value if hasattr(self, '_m_int_value') else None
if self.is_int:
self._m_int_value = (self.pos_int7_value if self.is_pos_int7 else (self.neg_int5_value if self.is_neg_int5 else 4919))
return self._m_int_value if hasattr(self, '_m_int_value') else None
@property
def str_len(self):
if hasattr(self, '_m_str_len'):
return self._m_str_len if hasattr(self, '_m_str_len') else None
if self.is_str:
self._m_str_len = ((self.b1 & 31) if self.is_fix_str else (self.str_len_8 if self.is_str_8 else (self.str_len_16 if self.is_str_16 else self.str_len_32)))
return self._m_str_len if hasattr(self, '_m_str_len') else None
@property
def is_fix_array(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-array
"""
if hasattr(self, '_m_is_fix_array'):
return self._m_is_fix_array if hasattr(self, '_m_is_fix_array') else None
self._m_is_fix_array = (self.b1 & 240) == 144
return self._m_is_fix_array if hasattr(self, '_m_is_fix_array') else None
@property
def is_map(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-map
"""
if hasattr(self, '_m_is_map'):
return self._m_is_map if hasattr(self, '_m_is_map') else None
self._m_is_map = ((self.is_fix_map) or (self.is_map_16) or (self.is_map_32))
return self._m_is_map if hasattr(self, '_m_is_map') else None
@property
def is_array(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-array
"""
if hasattr(self, '_m_is_array'):
return self._m_is_array if hasattr(self, '_m_is_array') else None
self._m_is_array = ((self.is_fix_array) or (self.is_array_16) or (self.is_array_32))
return self._m_is_array if hasattr(self, '_m_is_array') else None
@property
def is_float(self):
if hasattr(self, '_m_is_float'):
return self._m_is_float if hasattr(self, '_m_is_float') else None
self._m_is_float = ((self.is_float_32) or (self.is_float_64))
return self._m_is_float if hasattr(self, '_m_is_float') else None
@property
def is_str_8(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-str
"""
if hasattr(self, '_m_is_str_8'):
return self._m_is_str_8 if hasattr(self, '_m_is_str_8') else None
self._m_is_str_8 = self.b1 == 217
return self._m_is_str_8 if hasattr(self, '_m_is_str_8') else None
@property
def is_fix_map(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-map
"""
if hasattr(self, '_m_is_fix_map'):
return self._m_is_fix_map if hasattr(self, '_m_is_fix_map') else None
self._m_is_fix_map = (self.b1 & 240) == 128
return self._m_is_fix_map if hasattr(self, '_m_is_fix_map') else None
@property
def is_int(self):
if hasattr(self, '_m_is_int'):
return self._m_is_int if hasattr(self, '_m_is_int') else None
self._m_is_int = ((self.is_pos_int7) or (self.is_neg_int5))
return self._m_is_int if hasattr(self, '_m_is_int') else None
@property
def is_bool(self):
if hasattr(self, '_m_is_bool'):
return self._m_is_bool if hasattr(self, '_m_is_bool') else None
self._m_is_bool = ((self.b1 == 194) or (self.b1 == 195))
return self._m_is_bool if hasattr(self, '_m_is_bool') else None
@property
def is_str_16(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-str
"""
if hasattr(self, '_m_is_str_16'):
return self._m_is_str_16 if hasattr(self, '_m_is_str_16') else None
self._m_is_str_16 = self.b1 == 218
return self._m_is_str_16 if hasattr(self, '_m_is_str_16') else None
@property
def is_float_64(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-float
"""
if hasattr(self, '_m_is_float_64'):
return self._m_is_float_64 if hasattr(self, '_m_is_float_64') else None
self._m_is_float_64 = self.b1 == 203
return self._m_is_float_64 if hasattr(self, '_m_is_float_64') else None
@property
def is_map_16(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-map
"""
if hasattr(self, '_m_is_map_16'):
return self._m_is_map_16 if hasattr(self, '_m_is_map_16') else None
self._m_is_map_16 = self.b1 == 222
return self._m_is_map_16 if hasattr(self, '_m_is_map_16') else None
@property
def is_neg_int5(self):
if hasattr(self, '_m_is_neg_int5'):
return self._m_is_neg_int5 if hasattr(self, '_m_is_neg_int5') else None
self._m_is_neg_int5 = (self.b1 & 224) == 224
return self._m_is_neg_int5 if hasattr(self, '_m_is_neg_int5') else None
@property
def pos_int7_value(self):
if hasattr(self, '_m_pos_int7_value'):
return self._m_pos_int7_value if hasattr(self, '_m_pos_int7_value') else None
if self.is_pos_int7:
self._m_pos_int7_value = self.b1
return self._m_pos_int7_value if hasattr(self, '_m_pos_int7_value') else None
@property
def is_nil(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-nil
"""
if hasattr(self, '_m_is_nil'):
return self._m_is_nil if hasattr(self, '_m_is_nil') else None
self._m_is_nil = self.b1 == 192
return self._m_is_nil if hasattr(self, '_m_is_nil') else None
@property
def float_value(self):
if hasattr(self, '_m_float_value'):
return self._m_float_value if hasattr(self, '_m_float_value') else None
if self.is_float:
self._m_float_value = (self.float_32_value if self.is_float_32 else self.float_64_value)
return self._m_float_value if hasattr(self, '_m_float_value') else None
@property
def num_array_elements(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-array
"""
if hasattr(self, '_m_num_array_elements'):
return self._m_num_array_elements if hasattr(self, '_m_num_array_elements') else None
if self.is_array:
self._m_num_array_elements = ((self.b1 & 15) if self.is_fix_array else (self.num_array_elements_16 if self.is_array_16 else self.num_array_elements_32))
return self._m_num_array_elements if hasattr(self, '_m_num_array_elements') else None
@property
def neg_int5_value(self):
if hasattr(self, '_m_neg_int5_value'):
return self._m_neg_int5_value if hasattr(self, '_m_neg_int5_value') else None
if self.is_neg_int5:
self._m_neg_int5_value = -((self.b1 & 31))
return self._m_neg_int5_value if hasattr(self, '_m_neg_int5_value') else None
@property
def bool_value(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-bool
"""
if hasattr(self, '_m_bool_value'):
return self._m_bool_value if hasattr(self, '_m_bool_value') else None
if self.is_bool:
self._m_bool_value = self.b1 == 195
return self._m_bool_value if hasattr(self, '_m_bool_value') else None
@property
def is_pos_int7(self):
if hasattr(self, '_m_is_pos_int7'):
return self._m_is_pos_int7 if hasattr(self, '_m_is_pos_int7') else None
self._m_is_pos_int7 = (self.b1 & 128) == 0
return self._m_is_pos_int7 if hasattr(self, '_m_is_pos_int7') else None
@property
def is_array_16(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-array
"""
if hasattr(self, '_m_is_array_16'):
return self._m_is_array_16 if hasattr(self, '_m_is_array_16') else None
self._m_is_array_16 = self.b1 == 220
return self._m_is_array_16 if hasattr(self, '_m_is_array_16') else None
@property
def is_str(self):
if hasattr(self, '_m_is_str'):
return self._m_is_str if hasattr(self, '_m_is_str') else None
self._m_is_str = ((self.is_fix_str) or (self.is_str_8) or (self.is_str_16) or (self.is_str_32))
return self._m_is_str if hasattr(self, '_m_is_str') else None
@property
def is_fix_str(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-str
"""
if hasattr(self, '_m_is_fix_str'):
return self._m_is_fix_str if hasattr(self, '_m_is_fix_str') else None
self._m_is_fix_str = (self.b1 & 224) == 160
return self._m_is_fix_str if hasattr(self, '_m_is_fix_str') else None
@property
def is_str_32(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-str
"""
if hasattr(self, '_m_is_str_32'):
return self._m_is_str_32 if hasattr(self, '_m_is_str_32') else None
self._m_is_str_32 = self.b1 == 219
return self._m_is_str_32 if hasattr(self, '_m_is_str_32') else None
@property
def num_map_elements(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-map
"""
if hasattr(self, '_m_num_map_elements'):
return self._m_num_map_elements if hasattr(self, '_m_num_map_elements') else None
if self.is_map:
self._m_num_map_elements = ((self.b1 & 15) if self.is_fix_map else (self.num_map_elements_16 if self.is_map_16 else self.num_map_elements_32))
return self._m_num_map_elements if hasattr(self, '_m_num_map_elements') else None
@property
def is_float_32(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-float
"""
if hasattr(self, '_m_is_float_32'):
return self._m_is_float_32 if hasattr(self, '_m_is_float_32') else None
self._m_is_float_32 = self.b1 == 202
return self._m_is_float_32 if hasattr(self, '_m_is_float_32') else None
@property
def is_map_32(self):
"""
.. seealso::
Source - https://github.com/msgpack/msgpack/blob/master/spec.md#formats-map
"""
if hasattr(self, '_m_is_map_32'):
return self._m_is_map_32 if hasattr(self, '_m_is_map_32') else None
self._m_is_map_32 = self.b1 == 223
return self._m_is_map_32 if hasattr(self, '_m_is_map_32') else None
| 38.907923 | 256 | 0.616346 | 17,744 | 0.976555 | 0 | 0 | 11,585 | 0.637589 | 0 | 0 | 4,976 | 0.273858 |
cc14824a67115e3294372203e71ff2b7a1d285c1 | 807 | py | Python | awx/main/tests/unit/test_signals.py | gitEdouble/awx | 5885654405ccaf465f08df4db998a6dafebd9b4d | [
"Apache-2.0"
] | 11,396 | 2017-09-07T04:56:02.000Z | 2022-03-31T13:56:17.000Z | awx/main/tests/unit/test_signals.py | gitEdouble/awx | 5885654405ccaf465f08df4db998a6dafebd9b4d | [
"Apache-2.0"
] | 11,046 | 2017-09-07T09:30:46.000Z | 2022-03-31T20:28:01.000Z | awx/main/tests/unit/test_signals.py | gitEdouble/awx | 5885654405ccaf465f08df4db998a6dafebd9b4d | [
"Apache-2.0"
] | 3,592 | 2017-09-07T04:14:31.000Z | 2022-03-31T23:53:09.000Z | from awx.main import signals
class TestCleanupDetachedLabels:
def test_cleanup_detached_labels_on_deleted_parent(self, mocker):
mock_labels = [mocker.MagicMock(), mocker.MagicMock()]
mock_instance = mocker.MagicMock()
mock_instance.labels.all = mocker.MagicMock()
mock_instance.labels.all.return_value = mock_labels
mock_labels[0].is_candidate_for_detach.return_value = True
mock_labels[1].is_candidate_for_detach.return_value = False
signals.cleanup_detached_labels_on_deleted_parent(None, mock_instance)
mock_labels[0].is_candidate_for_detach.assert_called_with()
mock_labels[1].is_candidate_for_detach.assert_called_with()
mock_labels[0].delete.assert_called_with()
mock_labels[1].delete.assert_not_called()
| 42.473684 | 78 | 0.754647 | 775 | 0.960347 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
cc17e63865e94c50206dc38b7367c698252eecb5 | 23 | py | Python | FileUtils/__init__.py | cognitiveqe/cqepyutils | 7d83f2bbe667294802c2ca9f20a42e1fa3e635d5 | [
"MIT"
] | null | null | null | FileUtils/__init__.py | cognitiveqe/cqepyutils | 7d83f2bbe667294802c2ca9f20a42e1fa3e635d5 | [
"MIT"
] | null | null | null | FileUtils/__init__.py | cognitiveqe/cqepyutils | 7d83f2bbe667294802c2ca9f20a42e1fa3e635d5 | [
"MIT"
] | null | null | null | __all__ = ['FileUtils'] | 23 | 23 | 0.695652 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.478261 |
cc1c1df156994d9392bcac044186c4c4a494ce33 | 7,617 | py | Python | src/LoadData.py | josepquintana/HackUPC-2019 | ce962daf71464d3c12280a46b4691a21d3ca1bfa | [
"MIT"
] | null | null | null | src/LoadData.py | josepquintana/HackUPC-2019 | ce962daf71464d3c12280a46b4691a21d3ca1bfa | [
"MIT"
] | null | null | null | src/LoadData.py | josepquintana/HackUPC-2019 | ce962daf71464d3c12280a46b4691a21d3ca1bfa | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from pathlib import Path
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
class AccidentsData:
def __init__(self):
filename = Path('../data/accidents.csv')
if not filename.exists():
print('\nERROR: Missing dataset file: accidents.csv\n')
quit()
accidents = pd.read_csv(filename)
# Eliminar columnes que preeliminarment es consideren irrellevants
accidents = accidents.drop(columns=['police_force', 'local_authority_district', 'local_authority_highway',
'lsoa_of_accident_location', 'location_easting_osgr',
'location_northing_osgr'])
# One hot encoding
accidents = pd.get_dummies(accidents, columns=['1st_road_class', 'junction_detail', 'junction_control',
'2nd_road_class', 'pedestrian_crossing-human_control',
'pedestrian_crossing-physical_facilities', 'light_conditions',
'road_surface_conditions',
'special_conditions_at_site', 'carriageway_hazards'])
# Eliminar columnes associades a condició de les one hot que són desconegudes
cols_acaben_menysu = []
for colname in accidents.columns:
if colname[-3:] == '_-1':
cols_acaben_menysu.append(colname)
accidents = accidents.drop(columns=cols_acaben_menysu)
numeritza = {'urban_or_rural_area': {'Urban': 1,
'Rural': 0}
}
accidents.replace(numeritza, inplace=True)
# Si no hi ha condició excepcional, irrellevant
accidents = accidents.drop(columns=['special'
'_conditions_at_site_None', 'carriageway_hazards_None',
'1st_road_class_Unclassified',
'2nd_road_class_Unclassified'])
# Convertir hh:mm:00 a minuts desde mitjanit
accidents['time'] = accidents['time'].apply(lambda s: int(s[:-4]) * 60 + int(s[-2:]))
# Convertir aaaa:mm:dd a minuts desde mitjanit
accidents['date'] = accidents['date'].apply(lambda s: int(s[7:9]) + int(s[-2:-1]) * 30.44)
# Substituïr -10s per avg de la columna
accidents['2nd_road_number'].replace(-1, np.nan, inplace=True)
accidents['2nd_road_number'].fillna(accidents['2nd_road_number'].mean(), inplace=True)
# Normalitzat de les columnes que els cal
tobenorm = ['longitude', 'latitude', 'number_of_vehicles', 'number_of_casualties', 'date', 'time',
'1st_road_number',
'road_type', 'speed_limit', '2nd_road_number', 'weather_conditions']
norm = MinMaxScaler()
accidents[tobenorm] = norm.fit_transform(accidents[tobenorm])
#self.features = accidents.drop('target', axis=1)
self.Xtrain, self.Xtest, self.ytrain, self.ytest = train_test_split(accidents.drop('target', axis=1),
accidents['target'], train_size=.7)
def get_Xtrain(self):
return self.Xtrain
def get_Xtest(self):
return self.Xtest
def get_ytrain(self):
return self.ytrain
def get_ytest(self):
return self.ytest
class VehiclesData:
def __init__(self):
filename = Path('../data/vehicles.csv')
if not filename.exists():
print('\nERROR: Missing dataset file: vehicles.csv\n')
quit()
vehicles = pd.read_csv(filename)
vehicles = vehicles.drop(columns=['Vehicle_IMD_Decile'])
vehicles = pd.get_dummies(vehicles, columns=['Vehicle_Type', 'Towing_and_Articulation', 'Vehicle_Manoeuvre',
'Vehicle_Location-Restricted_Lane', 'Junction_Location',
'Skidding_and_Overturning', 'Hit_Object_in_Carriageway',
'Vehicle_Leaving_Carriageway', 'Hit_Object_off_Carriageway',
'1st_Point_of_Impact',
'Journey_Purpose_of_Driver', 'Propulsion_Code',
'Driver_IMD_Decile', 'Driver_Home_Area_Type'])
cols_acabenmenysu = []
for colname in vehicles.columns:
if colname[-3:] == '_-1' or colname[-5:] == '_-1.0':
cols_acabenmenysu.append(colname)
vehicles = vehicles.drop(columns=cols_acabenmenysu)
vehicles = vehicles.drop(vehicles[vehicles.Age_of_Driver < 15].index)
vehicles['Engine_Capacity_(CC)'].replace(-1, np.nan, inplace=True)
vehicles['Engine_Capacity_(CC)'].replace('-1', np.nan, inplace=True)
vehicles['Engine_Capacity_(CC)'].fillna(vehicles['Engine_Capacity_(CC)'].mean(), inplace=True)
vehicles['Age_of_Driver'].replace(-1, np.nan, inplace=True)
vehicles['Age_of_Driver'].replace('-1', np.nan, inplace=True)
vehicles['Age_of_Driver'].fillna(vehicles['Age_of_Driver'].mean(), inplace=True)
vehicles['Age_of_Vehicle'].replace(-1, np.nan, inplace=True)
vehicles['Age_of_Vehicle'].fillna(vehicles['Age_of_Vehicle'].mean(), inplace=True)
vehicles['Was_Vehicle_Left_Hand_Drive?'].replace(-1, np.nan, inplace=True)
vehicles['Was_Vehicle_Left_Hand_Drive?'].replace('-1', np.nan, inplace=True)
vehicles['Sex_of_Driver'].replace(-1, np.nan, inplace=True)
vehicles['Sex_of_Driver'].replace('-1', np.nan, inplace=True)
vehicles['Sex_of_Driver'].replace('Not known', np.nan, inplace=True)
dicvehicles = {'Sex_of_Driver': {'Male': 1.0, 'Female': 0.0},
'Was_Vehicle_Left_Hand_Drive?': {'Yes': 1.0, 'No': 0.0}
}
vehicles.replace(dicvehicles, inplace=True)
vehicles['Was_Vehicle_Left_Hand_Drive?'].fillna(vehicles['Was_Vehicle_Left_Hand_Drive?'].mean(), inplace=True)
vehicles['Sex_of_Driver'].fillna(vehicles['Sex_of_Driver'].mean(), inplace=True)
tobenorm = ['Age_of_Driver', 'Engine_Capacity_(CC)', 'Age_of_Vehicle']
norm = MinMaxScaler()
vehicles[tobenorm] = norm.fit_transform(vehicles[tobenorm])
self.valors = vehicles
def get_valors(self):
return self.valors
class MergedData:
def __init__(self, accidents, vehicles):
acctarg_train = pd.concat([accidents.get_Xtrain(), accidents.get_ytrain()], axis=1)
acctarg_test = pd.concat([accidents.get_Xtest(), accidents.get_ytest()], axis=1)
merged_train = pd.merge(acctarg_train, vehicles.get_valors(), on='accident_id')
merged_test = pd.merge(acctarg_test, vehicles.get_valors(), on='accident_id')
self.target_train = merged_train['target']
self.target_test = merged_test['target']
self.merged_train = merged_train.drop('target', axis=1)
self.merged_test = merged_test.drop('target', axis=1)
def get_merged_train(self):
return self.merged_train
def get_target_train(self):
return self.target_train
def get_merged_test(self):
return self.merged_test
def get_target_test(self):
return self.target_test
| 46.163636 | 118 | 0.595641 | 7,448 | 0.9773 | 0 | 0 | 0 | 0 | 0 | 0 | 2,305 | 0.302454 |
cc1e9331027ec43231a6a77d60167ad68f144c01 | 471 | py | Python | src/main/python/bayou/experiments/predictMethods/buildQueryJSONs/cleanup.py | rohan2606/bayou | eca7a6628aa5942aeb4b57f684013c7e15ccb171 | [
"Apache-2.0"
] | 1 | 2018-02-19T21:48:06.000Z | 2018-02-19T21:48:06.000Z | src/main/python/bayou/experiments/predictMethods/buildQueryJSONs/cleanup.py | rohan2606/bayou | eca7a6628aa5942aeb4b57f684013c7e15ccb171 | [
"Apache-2.0"
] | null | null | null | src/main/python/bayou/experiments/predictMethods/buildQueryJSONs/cleanup.py | rohan2606/bayou | eca7a6628aa5942aeb4b57f684013c7e15ccb171 | [
"Apache-2.0"
] | 1 | 2019-02-07T20:26:36.000Z | 2019-02-07T20:26:36.000Z | import os
import shutil
import sys
def rm_r(path):
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path)
elif os.path.exists(path):
os.remove(path)
def cleanUp(logdir = "./log/"):
print("Cleaning all files in log ... ", end="")
sys.stdout.flush()
rm_r(logdir)
# for f in os.listdir(logdir):
# rm_r(os.path.join(logdir, f))
os.mkdir(logdir)
os.mkdir(logdir + "/JSONFiles")
print("Done")
| 19.625 | 56 | 0.605096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.265393 |
cc206b35fef0c258a968de43d46a4569a21a39b6 | 1,577 | py | Python | videoanalyst/model/builder.py | JIANG-CX/data_labeling | 8d2470bbb537dfc09ed2f7027ed8ee7de6447248 | [
"MIT"
] | 1 | 2021-05-24T10:08:51.000Z | 2021-05-24T10:08:51.000Z | videoanalyst/model/builder.py | JIANG-CX/data_labeling | 8d2470bbb537dfc09ed2f7027ed8ee7de6447248 | [
"MIT"
] | null | null | null | videoanalyst/model/builder.py | JIANG-CX/data_labeling | 8d2470bbb537dfc09ed2f7027ed8ee7de6447248 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*
from typing import Dict
from yacs.config import CfgNode
from .backbone import builder as backbone_builder
from .loss import builder as loss_builder
from .task_head import builder as head_builder
from .task_model import builder as task_builder
def build_model(
task: str,
cfg: CfgNode,
):
r"""
Builder function.
Arguments
---------
task: str
builder task name (track|vos)
cfg: CfgNode
buidler configuration
Returns
-------
torch.nn.Module
module built by builder
"""
if task == "track":
backbone = backbone_builder.build(task, cfg.backbone)
losses = loss_builder.build(task, cfg.losses)
head = head_builder.build(task, cfg.task_head)
task_model = task_builder.build(task, cfg.task_model, backbone, head,
losses)
return task_model
else:
print("model for task {} is not complted".format(task))
exit(-1)
def get_config() -> Dict[str, CfgNode]:
r"""
Get available component list config
Returns
-------
Dict[str, CfgNode]
config with list of available components
"""
cfg_dict = {"track": CfgNode(), "vos": CfgNode()}
for task in cfg_dict:
cfg = cfg_dict[task]
cfg["backbone"] = backbone_builder.get_config()[task]
cfg["losses"] = loss_builder.get_config()[task]
cfg["task_model"] = task_builder.get_config()[task]
cfg["task_head"] = head_builder.get_config()[task]
return cfg_dict
| 25.435484 | 77 | 0.615726 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 505 | 0.320228 |
cc20da7407be0989d712096316aafb2ddf48ad9c | 8,221 | py | Python | tx1_pcie/demo/tx1_pcie/site_scons/xst_utils.py | CospanDesign/nysa-tx1-pcie-platform | d1b6148c6e00cae84759c9e0d56858354a901780 | [
"MIT"
] | 15 | 2015-08-31T20:50:39.000Z | 2022-03-13T08:56:39.000Z | tx1_pcie/demo/tx1_pcie/site_scons/xst_utils.py | CospanDesign/nysa-tx1-pcie-platform | d1b6148c6e00cae84759c9e0d56858354a901780 | [
"MIT"
] | 5 | 2015-05-02T16:48:57.000Z | 2017-06-15T16:25:34.000Z | tx1_pcie/demo/tx1_pcie/site_scons/xst_utils.py | CospanDesign/nysa-tx1-pcie-platform | d1b6148c6e00cae84759c9e0d56858354a901780 | [
"MIT"
] | 6 | 2016-09-02T16:02:13.000Z | 2021-06-29T22:29:45.000Z | #Distributed under the MIT licesnse.
#Copyright (c) 2013 Cospan Design (dave.mccoy@cospandesign.com)
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
#of the Software, and to permit persons to whom the Software is furnished to do
#so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import os
import json
import utils
import coregen_utils
XST_DEFAULT_FLAG_FILE = "xst_default_flags.json"
PROJECT_FILENAME = "project.prj"
XST_SCRIPT_FILENAME = "xst_script.xst"
XST_TEMP_DIR = "projnav.tmp"
XST_DIR = "xst"
XST_OUTFILE = "xst_out"
XST_PROJECT_LSO = "project.lso"
def get_xst_flags(config):
"""
Given a configuration dictionary return flags for the XST build
if user flags are not specified take the default flags from
site_scons/xst_default_flags.json
Args:
config (dictionary): configuration dictionary
Return:
Dictionary of flags used to create the XST script
Raises:
Nothing
"""
#print "Apply slave tags"
flags = {}
user_flags = {}
if "xst" in config.keys():
if "flags" in config["xst"].keys():
user_flags = config["xst"]["flags"]
fn = os.path.join(os.path.dirname(__file__), XST_DEFAULT_FLAG_FILE)
default_flags = json.load(open(fn, "r"))
for key in default_flags:
flags[key] = default_flags[key]
if key in user_flags.keys():
flags[key]["value"] = user_flags[key]
return flags
def create_xst_dir(config):
"""
Create an xst directiroy in the build folder
Args:
config (dictionary): configuration dictionary
Return:
(string): xst output directory (relative)
Raises:
Nothing
"""
#Create a output directory if it does not exist
build_dir = utils.create_build_directory(config)
#Now I have an output directory to put stuff in
#Create an XST directory to put stuff related to XST
xst_dir = os.path.join(build_dir, XST_DIR)
if not os.path.exists(xst_dir):
os.makedirs(xst_dir)
return xst_dir
def get_xst_dir(config, absolute = False):
"""Returns the xst output directory location
Args:
config (dictionary): configuration dictionary
absolute (boolean):
False (default): Relative to project base
True: Absolute
Returns:
(string): strin representation of the path to the output
Raises:
Nothing
"""
build_dir = utils.get_build_directory(config, absolute)
xst_dir = os.path.join(build_dir, XST_DIR)
return xst_dir
def create_temp_dir(config):
"""
Create an xst temporary directory in the build folder
Args:
config (dictionary): configuration dictionary
Return:
Nothing
Raises:
Nothing
"""
xst_dir = os.path.join(config["build_dir"], XST_DIR)
temp_dir = os.path.join(xst_dir, XST_TEMP_DIR)
temp_abs_dir = os.path.join(utils.get_project_base(), xst_dir, XST_TEMP_DIR)
if not os.path.exists(temp_abs_dir):
os.makedirs(temp_abs_dir)
return temp_dir
def create_xst_project_file(config):
"""
Given a configuration file create the .prj which holds the verilog
filenames to be built
Args:
config (dictionary): configuration dictionary
Return:
Nothing
Raises:
Nothing
"""
#print "Creating xst project file"
xst_dir = create_xst_dir(config)
project_fn = os.path.join(xst_dir, PROJECT_FILENAME)
fp = open(project_fn, "w")
v = ""
#XXX: There should be allowances for adding different libraries in the future
for vf in config["verilog"]:
v += "verilog work \"%s\"%s" % (vf, os.linesep)
#print "project file:\n%s" % v
fp.write(v)
fp.close()
def get_report_filename(config):
"""
get the output filename for the project
Args:
config (dictionary): configuration dictionary
Return:
Nothing
Raises:
Nothing
"""
xst_abs_dir = create_xst_dir(config)
top_module = config["top_module"]
output_file = os.path.join(xst_abs_dir, "%s.syr" % top_module)
return output_file
def get_xst_filename(config, absolute = False):
"""get the output filename"""
xst_dir = get_xst_dir(config, absolute)
top_module = config["top_module"]
xst_file = os.path.join(xst_dir, "%s.xst" % top_module)
#print "xst filename: %s" % xst_file
return xst_file
def get_ngc_filename(config, absolute = False):
"""get the output filename"""
xst_dir = get_xst_dir(config, absolute)
top_module = config["top_module"]
ngc_file = os.path.join(xst_dir, "%s.ngc" % top_module)
#print "xst filename: %s" % xst_file
return ngc_file
def create_lso_file(config):
"""
Creates a library search order file location for the XST script
This is to declutter the base directory
Args:
config (dictionary): configuraiton dictionary
Return:
Nothing
Raises:
(string) relative lso file name
"""
xst_dir = os.path.join(config["build_dir"], XST_DIR)
lso_fn = os.path.join(xst_dir, XST_PROJECT_LSO)
xst_abs_dir = create_xst_dir(config)
fn = os.path.join(xst_abs_dir, XST_PROJECT_LSO)
#print "lSO filename: %s" % fn
fp = open(fn, "w")
#fp.write("DEFAULT_SEARCH_ORDER%s" % os.linesep)
fp.write("work%s" % os.linesep)
fp.close()
return lso_fn
#return fn
def create_xst_script(config):
"""
given the configuration file create a script that will
build the verilog files declared within the configuration file
Args:
config (dictionary): configuraiton dictionary
Return:
(string) script file name
Raises:
Nothing
"""
xst_abs_dir = create_xst_dir(config)
flags = get_xst_flags(config)
#print "Flags: %s" % str(flags)
xst_dir = os.path.join(config["build_dir"], XST_DIR)
temp_dir = create_temp_dir(config)
project_dir = os.path.join(xst_dir, PROJECT_FILENAME)
top_module = config["top_module"]
output_file = os.path.join(xst_dir, top_module)
xst_script_fn = os.path.join(xst_abs_dir, XST_SCRIPT_FILENAME)
fp = open(xst_script_fn, "w")
fp.write("set -tmpdir \"%s\"%s" % (temp_dir, os.linesep))
fp.write("set -xsthdpdir \"%s\"%s" % (xst_dir, os.linesep))
#fp.write("set -xsthdpini \"%s\"%s" % (xst_dir, os.linesep))
fp.write("run%s" % os.linesep)
fp.write("-ifn %s%s" % (project_dir, os.linesep))
fp.write("-ofn %s%s" % (output_file, os.linesep))
fp.write("-ofmt NGC%s" % (os.linesep))
fp.write("-p %s%s" % (config["device"], os.linesep))
fp.write("-top %s%s" % (top_module, os.linesep))
coregen_files = coregen_utils.get_target_files(config)
if len(coregen_files) > 0:
fp.write("-sd %s%s" % (coregen_utils.get_coregen_dir(config, absolute = True), os.linesep))
#print "flags[lso] = %s" % str(flags["-lso"]["value"])
if ("-lso" not in flags.keys()) or (len(flags["-lso"]["value"]) == 0):
#print "creating custom lso file"
flags["-lso"]["value"] = create_lso_file(config)
for flag in flags:
if len(flags[flag]["value"]) == 0:
continue
#print "flag: %s: %s" % (flag, flags[flag]["value"])
fp.write("%s %s%s" % (flag, flags[flag]["value"], os.linesep))
fp.close()
return xst_script_fn
| 29.256228 | 99 | 0.665612 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,409 | 0.536309 |
cc213e43355eccd688ab41131dff02e635716ed2 | 209 | py | Python | samples/python/13.core-bot/envs/chat_bot_02/Lib/site-packages/datatypes_date_time/timex_relative_convert.py | luzeunice/BotBuilder-Samples | b62be4e8863125a567902b736b7b74313d9d4f28 | [
"MIT"
] | null | null | null | samples/python/13.core-bot/envs/chat_bot_02/Lib/site-packages/datatypes_date_time/timex_relative_convert.py | luzeunice/BotBuilder-Samples | b62be4e8863125a567902b736b7b74313d9d4f28 | [
"MIT"
] | null | null | null | samples/python/13.core-bot/envs/chat_bot_02/Lib/site-packages/datatypes_date_time/timex_relative_convert.py | luzeunice/BotBuilder-Samples | b62be4e8863125a567902b736b7b74313d9d4f28 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
class TimexRelativeConvert:
@staticmethod
def convert_timex_to_string_relative(timex):
return ''
| 23.222222 | 59 | 0.746411 | 113 | 0.54067 | 0 | 0 | 80 | 0.382775 | 0 | 0 | 94 | 0.449761 |
cc216b40493d6634149864a1b4ffd4dfaabc801a | 9,740 | py | Python | src/levitas/lib/daemonize.py | tobi-weber/levitas | b14fb4135839611ace652b9f43cbe5a7fa5e3b66 | [
"Apache-2.0"
] | 1 | 2018-02-27T00:28:29.000Z | 2018-02-27T00:28:29.000Z | src/levitas/lib/daemonize.py | tobi-weber/levitas | b14fb4135839611ace652b9f43cbe5a7fa5e3b66 | [
"Apache-2.0"
] | null | null | null | src/levitas/lib/daemonize.py | tobi-weber/levitas | b14fb4135839611ace652b9f43cbe5a7fa5e3b66 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (C) 2010-2014 Tobias Weber <tobi-weber@gmx.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import signal
import abc
import logging
from time import sleep
from multiprocessing import Process
from argparse import ArgumentParser
from levitas.lib.modificationmonitor import ModificationMonitor
from .settings import SettingMissing
log = logging.getLogger("levitas.lib.daemonize")
def cli(daemon_class, daemon_args=[], daemon_kwargs={}, umask=0):
"""
Command-line interface to control a daemon.
@param daemon_class: Subclass of L{AbstractDaemon}.
@param daemon_args: Arguments to instantiate the daemon.
@param daemon_kwargs: Named arguments to instantiate the daemon.
@param umask: file mode creation mask.
"""
name = os.path.basename(sys.argv[0])
options = CLIOptions(name)
try:
options.parse_args()
except CLIOptionError as err:
sys.stderr.write(str(err))
sys.exit(1)
sys.stdout.write("%s %s: " % (options.action or "start", name))
if options.reloader and "MODIFICATIONMONITOR_STARTED" not in os.environ:
sys.stdout.write("Start ModificationMonitor\n")
ModificationMonitor()
sys.exit(0)
try:
dz = Daemonizer(daemon_class,
chdir=os.getcwd(),
umask=umask,
daemon_args=daemon_args,
daemon_kwargs=daemon_kwargs)
if dz.do_action(options.action, options.pidfile):
sys.stdout.write("done\n")
return True
else:
sys.stdout.write("failed\n")
return False
except SettingMissing as err:
sys.stderr.write(err)
class AbstractDaemon:
metaclass = abc.ABCMeta
@abc.abstractmethod
def start(self):
pass
@abc.abstractmethod
def stop(self):
pass
class Daemonizer(Process):
def __init__(self, daemon_class,
chdir="/", umask=0,
daemon_args=[], daemon_kwargs={}):
if not issubclass(daemon_class, AbstractDaemon):
raise TypeError("%s is not subclass of %s"
% (str(daemon_class), str(AbstractDaemon)))
Process.__init__(self)
self.daemon_class = daemon_class
self.chdir = chdir
self.umask = umask
self.daemon_args = daemon_args
self.daemon_kwargs = daemon_kwargs
self.pidfile = None
self.daemon_process = None
self._daemonize = False
def read_pidfile(self):
try:
f = open(self.pidfile, "r")
pid = int(f.read().strip())
f.close()
except IOError:
pid = None
return pid
def do_action(self, action, pidfile):
if action not in ["start", "stop", "restart", "foreground"]:
action = "foreground"
self.pidfile = pidfile
if pidfile is not None:
pid = self.read_pidfile()
else:
pid = None
if action == "start":
return self.do_start_action(pid)
elif action == "stop":
return self.do_stop_action(pid)
elif action == "restart":
if self.do_stop_action(pid):
pid = self.read_pidfile()
return self.do_start_action(pid)
else:
return False
elif action == "foreground":
# Start as a subprocess without making a daemon
self.start()
return True
def do_start_action(self, pid):
if pid:
msg = "Start aborted, pid-file '%s' exist.\n"
sys.stderr.write(msg % self.pidfile)
return False
self._daemonize = True
self.start()
return True
def do_stop_action(self, pid):
if not pid:
msg = "Could not stop process, missing pid-file '%s'.\n"
sys.stderr.write(msg % self.pidfile)
return False
try:
while True:
os.kill(pid, signal.SIGTERM)
sleep(0.1)
except OSError:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
return True
def setsignals(self):
signal.signal(signal.SIGTERM, self.sigexit)
signal.signal(signal.SIGHUP, self.sigexit)
signal.signal(signal.SIGINT, self.sigexit)
signal.signal(signal.SIGQUIT, self.sigexit)
def sigexit(self, sig, frame):
log.debug("Stop process")
self.daemon_process.stop()
sys.exit(0)
def run(self):
# Make a daemon
if self._daemonize:
self.daemonize()
try:
self.start_process()
except:
raise
def start_process(self):
self.setsignals()
os.chdir(self.chdir)
self.daemon_process = self.daemon_class(*self.daemon_args,
**self.daemon_kwargs)
self.daemon_process.start()
def daemonize(self):
pid = os.fork()
if pid != 0:
# Parent
os._exit(0)
# Child
os.close(0)
sys.stdin = sys.__stdin__ = open("/dev/null")
os.chdir(self.chdir)
os.umask(self.umask)
os.setsid()
pid = str(os.getpid())
if self.pidfile:
f = file(self.pidfile, "w+")
f.write("%s\n" % pid)
f.close()
class CLIOptionError(Exception):
pass
class CLIOptions(object):
def __init__(self, name):
self.name = name
self.parser = ArgumentParser()
self.pidfile = None
self.action = None
self.parser.add_argument("action", type=str, nargs='?',
choices=["start", "stop", "restart", "foreground"])
self.parser.add_argument("-l", "--logfile",
dest="logfile",
type=str,
help="Path to logfile (optional)")
self.parser.add_argument("-c", "--logfilecount",
dest="logfilecount",
type=int, default=0,
help="Count of old logfiles to be saved. (default: 0)")
self.parser.add_argument("-v", "--verbose",
dest="verbose",
action="store_true",
help="vebose output")
self.parser.add_argument("-s", "--SETTINGS",
dest="settings_module",
type=str,
help="SETTINGS module (required)",
metavar="SETTINGS_MODULE")
self.parser.add_argument("-r", "--RELOADER",
dest="reloader",
action="store_true",
help="Start with autoreloader")
self.parser.add_argument("-p", "--pidfile",
dest="pidfile",
type=str,
default="/var/run/%s.pid" % self.name,
help="pidfile")
def parse_args(self):
args = self.parser.parse_args()
logfile = args.logfile
logfilecount = args.logfilecount
self.pidfile = args.pidfile
self.action = args.action or "foreground"
self.reloader = args.reloader
if hasattr(args, "settings_module"):
if args.settings_module:
os.environ["LEVITAS_SETTINGS"] = args.settings_module
else:
self.parser.print_help()
msg = "option --setting required \n\n"
raise CLIOptionError(msg)
if self.action == "start":
self._initLogging(args.verbose, logfile, logfilecount)
elif self.action == "foreground":
if logfile is None:
logfile = "console"
self._initLogging(args.verbose, logfile, logfilecount)
def _initLogging(self, verbose=False, logfile=None, logfilecount=0):
log = logging.getLogger()
if logfile == "console":
h = logging.StreamHandler()
elif logfile is not None:
from logging.handlers import RotatingFileHandler
doRotation = True if os.path.exists(logfile) else False
h = RotatingFileHandler(logfile, backupCount=logfilecount)
if doRotation:
h.doRollover()
else:
return
if verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name)s "
"- %(levelname)s - %(message)s")
h.setFormatter(formatter)
log.addHandler(h)
| 32.575251 | 88 | 0.530698 | 7,432 | 0.763039 | 0 | 0 | 105 | 0.01078 | 0 | 0 | 1,928 | 0.197947 |
cc22117e8552bbc6f7598294280399755ea7e496 | 1,255 | py | Python | bootstrapvz/common/fs/virtualharddisk.py | brett-smith/bootstrap-vz | 2eaa98db684b85186f3ecd6e5d1304aaceca6b73 | [
"Apache-2.0"
] | null | null | null | bootstrapvz/common/fs/virtualharddisk.py | brett-smith/bootstrap-vz | 2eaa98db684b85186f3ecd6e5d1304aaceca6b73 | [
"Apache-2.0"
] | null | null | null | bootstrapvz/common/fs/virtualharddisk.py | brett-smith/bootstrap-vz | 2eaa98db684b85186f3ecd6e5d1304aaceca6b73 | [
"Apache-2.0"
] | null | null | null | from .qemuvolume import QEMUVolume
from ..tools import log_check_call
import math
class VirtualHardDisk(QEMUVolume):
extension = 'vhd'
qemu_format = 'vpc'
ovf_uri = 'http://go.microsoft.com/fwlink/?LinkId=137171'
# Azure requires the image size to be a multiple of 1 MiB.
# VHDs are dynamic by default, so we add the option
# to make the image size fixed (subformat=fixed)
def _before_create(self, e):
self.image_path = e.image_path
vol_size = str(self.size.bytes.get_qty_in('MiB')) + 'M'
log_check_call(['qemu-img', 'create', '-o', 'subformat=fixed', '-f', self.qemu_format, self.image_path + '.tmp', vol_size])
# https://serverfault.com/questions/770378/problems-preparing-a-disk-image-for-upload-to-azure
# Note, this doesn't seem to work if you try and create with the force_size option, it must be in convert
log_check_call(['qemu-img', 'convert', '-f', 'raw', '-O', self.qemu_format, '-o', 'subformat=fixed,force_size', self.image_path + '.tmp', self.image_path])
log_check_call(['rm', self.image_path + '.tmp'])
def get_uuid(self):
if not hasattr(self, 'uuid'):
import uuid
self.uuid = uuid.uuid4()
return self.uuid
| 44.821429 | 163 | 0.658964 | 1,171 | 0.933068 | 0 | 0 | 0 | 0 | 0 | 0 | 556 | 0.443028 |
cc22c0eacdbe6c82e058120ed038bae92274d066 | 2,180 | py | Python | tests/test_flight_path.py | flyinactor91/AVWX-Engine | 0d3ce2c6e962d2a3ec9db711caf9d1c94658fa80 | [
"MIT"
] | 30 | 2015-09-08T20:38:41.000Z | 2019-03-10T07:10:47.000Z | tests/test_flight_path.py | sthagen/avwx-engine | af235b9d26e5495f04076ed5499cf8cd131d4efc | [
"MIT"
] | 5 | 2015-08-12T15:50:07.000Z | 2019-04-16T00:42:12.000Z | tests/test_flight_path.py | sthagen/avwx-engine | af235b9d26e5495f04076ed5499cf8cd131d4efc | [
"MIT"
] | 11 | 2016-01-17T10:10:29.000Z | 2019-01-13T17:55:36.000Z | """
Flight path tests
"""
from typing import List, Union
from avwx import flight_path
from avwx.structs import Coord
FLIGHT_PATHS = (
(
[(12.34, -12.34, "12.34,-12.34"), (-43.21, 43.21, "-43.21,43.21")],
[(12.34, -12.34, "12.34,-12.34"), (-43.21, 43.21, "-43.21,43.21")],
),
(
[(12.34, -12.34, "12.34,-12.34"), "KMCO"],
[(12.34, -12.34, "12.34,-12.34"), (28.43, -81.31, "KMCO")],
),
(["KLEX", "KMCO"], [(38.04, -84.61, "KLEX"), (28.43, -81.31, "KMCO")]),
(["FLL", "ATL"], [(26.07, -80.15, "FLL"), (33.63, -84.44, "ATL")]),
(
["KMIA", "FLL", "ORL"],
[(25.79, -80.29, "KMIA"), (26.07, -80.15, "FLL"), (28.54, -81.33, "ORL")],
),
(
["FLL", "ORL", "KMCO"],
[(26.07, -80.15, "FLL"), (28.54, -81.33, "ORL"), (28.43, -81.31, "KMCO")],
),
(
["KMIA", "FLL", "ORL", "KMCO"],
[
(25.79, -80.29, "KMIA"),
(26.07, -80.15, "FLL"),
(28.54, -81.33, "ORL"),
(28.43, -81.31, "KMCO"),
],
),
(
["KLEX", "ATL", "ORL", "KMCO"],
[
(38.04, -84.61, "KLEX"),
(33.63, -84.44, "ATL"),
(28.54, -81.33, "ORL"),
(28.43, -81.31, "KMCO"),
],
),
(
["KLEX", "ATL", "KDAB", "ORL", "KMCO"],
[
(38.04, -84.61, "KLEX"),
(33.63, -84.44, "ATL"),
(29.18, -81.06, "KDAB"),
(28.54, -81.33, "ORL"),
(28.43, -81.31, "KMCO"),
],
),
)
def _to_coord(coords: List[Union[tuple, str]]) -> List[Union[Coord, str]]:
for i, item in enumerate(coords):
if isinstance(item, tuple):
coords[i] = Coord(lat=item[0], lon=item[1], repr=item[2])
return coords
def test_to_coordinates():
"""Test coord routing from coords, stations, and navaids"""
for source, target in FLIGHT_PATHS:
source = _to_coord(source)
coords = flight_path.to_coordinates(source)
# Round to prevent minor coord changes from breaking tests
coords = [(round(c.lat, 2), round(c.lon, 2), c.repr) for c in coords]
assert coords == target
| 29.459459 | 82 | 0.444037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 490 | 0.224771 |
cc2324d77ef33a73ea326b4da126a5dc8ddd8995 | 312 | py | Python | reports/api/urls.py | elishaking/i-witness | 09fe9f6db04fb64440c306e714a5233db31db23e | [
"Apache-2.0"
] | null | null | null | reports/api/urls.py | elishaking/i-witness | 09fe9f6db04fb64440c306e714a5233db31db23e | [
"Apache-2.0"
] | 2 | 2021-06-08T20:53:14.000Z | 2021-06-10T22:31:47.000Z | reports/api/urls.py | elishaking/i-witness | 09fe9f6db04fb64440c306e714a5233db31db23e | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
# report route
url(r'^create/$', views.ReportCreateAPIView.as_view(), name='create'),
url(r'^$', views.ReportListAPIView.as_view(), name='list'),
url(r'^(?P<pk>\d+)$', views.ReportRetrieveAPIView.as_view(), name='detail'),
]
| 28.363636 | 80 | 0.663462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 69 | 0.221154 |
cc254320a120806976fb2027ee1ac70ba2ebda77 | 42 | py | Python | GitSearch.py | inishchith/GithubTools | 6b346d063bb727045407498d6710ee2680ad4d8f | [
"MIT"
] | 1 | 2017-05-23T11:23:09.000Z | 2017-05-23T11:23:09.000Z | GitSearch.py | inishchith/GithubTools | 6b346d063bb727045407498d6710ee2680ad4d8f | [
"MIT"
] | null | null | null | GitSearch.py | inishchith/GithubTools | 6b346d063bb727045407498d6710ee2680ad4d8f | [
"MIT"
] | null | null | null | # Find file contents via various criteria. | 42 | 42 | 0.809524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 1 |
cc2803656376a925b69dd8c627bef259b6c0a2d2 | 666 | py | Python | apysc/_event/stop_propagation_interface.py | simon-ritchie/apyscript | c319f8ab2f1f5f7fad8d2a8b4fc06e7195476279 | [
"MIT"
] | 16 | 2021-04-16T02:01:29.000Z | 2022-01-01T08:53:49.000Z | apysc/_event/stop_propagation_interface.py | simon-ritchie/apysc | 61d0078e5f3b702eaacceedfbe6e5cafe48f8033 | [
"MIT"
] | 613 | 2021-03-24T03:37:38.000Z | 2022-03-26T10:58:37.000Z | apysc/_event/stop_propagation_interface.py | simon-ritchie/apyscript | c319f8ab2f1f5f7fad8d2a8b4fc06e7195476279 | [
"MIT"
] | 2 | 2021-06-20T07:32:58.000Z | 2021-12-26T08:22:11.000Z | """Class implementation for the stop_propagation interface.
"""
from apysc._type.variable_name_interface import VariableNameInterface
class StopPropagationInterface(VariableNameInterface):
def stop_propagation(self) -> None:
"""
Stop event propagation.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self.stop_propagation, locals_=locals(),
module_name=__name__, class_=StopPropagationInterface):
expression: str = (
f'{self.variable_name}.stopPropagation();'
)
ap.append_js_expression(expression=expression)
| 31.714286 | 72 | 0.630631 | 521 | 0.782282 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.232733 |
cc284745b925e21a5f5e32898b37c6a8ab358a00 | 21,312 | py | Python | src/falconpy/custom_ioa.py | mccbryan3/falconpy | ec4d3a574f2e9b06d046fc8d7ca6818f1f97331f | [
"Unlicense"
] | null | null | null | src/falconpy/custom_ioa.py | mccbryan3/falconpy | ec4d3a574f2e9b06d046fc8d7ca6818f1f97331f | [
"Unlicense"
] | null | null | null | src/falconpy/custom_ioa.py | mccbryan3/falconpy | ec4d3a574f2e9b06d046fc8d7ca6818f1f97331f | [
"Unlicense"
] | null | null | null | """
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
custom_ioa - Falcon Custom Indicators of Attack API Interface Class
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
# pylint: disable=C0103 # Aligning method names to API operation IDs
from ._util import service_request, parse_id_list, force_default, args_to_params
from ._service_class import ServiceClass
from ._endpoint._custom_ioa import _custom_ioa_endpoints as Endpoints
class Custom_IOA(ServiceClass):
"""
The only requirement to instantiate an instance of this class
is a valid token provided by the Falcon API SDK OAuth2 class.
"""
@force_default(defaults=["parameters"], default_types=["dict"])
def get_patterns(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get pattern severities by ID
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-patterns
operation_id = "get_patterns"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def get_platformsMixin0(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get platforms by ID
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-platformsMixin0
operation_id = "get_platformsMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def get_rule_groupsMixin0(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get rule groups by ID
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-rule-groupsMixin0
operation_id = "get_rule_groupsMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
def create_rule_groupMixin0(self: object, body: dict, cs_username: str) -> dict:
"""
Create a rule group for a platform with a name and an optional description. Returns the rule group.
"""
# [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/create-rule-groupMixin0
operation_id = "create_rule_groupMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
header_payload["X-CS-USERNAME"] = cs_username
body_payload = body
returned = service_request(caller=self,
method="POST",
endpoint=target_url,
body=body_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def delete_rule_groupMixin0(self: object, *args, **kwargs) -> dict:
"""
Delete rule groups by ID. (Redirects to actual method. Typo fix.)
"""
returned = self.delete_rule_groupsMixin0(*args, **kwargs)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def delete_rule_groupsMixin0(self: object, cs_username: str, parameters: dict = None, **kwargs) -> dict:
"""
Delete rule groups by ID.
"""
# [DELETE] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/delete-rule-groupsMixin0
operation_id = "delete_rule_groupsMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
header_payload["X-CS-USERNAME"] = cs_username
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="DELETE",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
def update_rule_groupMixin0(self: object, body: dict, cs_username: str) -> dict:
"""
Update a rule group. The following properties can be modified: name, description, enabled.
"""
# [PATCH] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/update-rule-groupMixin0
operation_id = "update_rule_groupMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
header_payload["X-CS-USERNAME"] = cs_username
body_payload = body
returned = service_request(caller=self,
method="PATCH",
endpoint=target_url,
body=body_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def get_rule_types(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get rule types by ID
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-rule-types
operation_id = "get_rule_types"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
def get_rules_get(self: object, ids) -> dict:
"""
Get rules by ID and optionally version in the following format: ID[:version]
"""
# [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-rules-get
operation_id = "get_rules_get"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
body_payload = {}
body_payload["ids"] = parse_id_list(ids).split(",")
returned = service_request(caller=self,
method="POST",
endpoint=target_url,
body=body_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def get_rulesMixin0(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get rules by ID and optionally version in the following format: ID[:version].
The max number of IDs is constrained by URL size.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-rulesMixin0
operation_id = "get_rulesMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
def create_rule(self: object, body: dict, cs_username: str) -> dict:
"""
Create a rule within a rule group. Returns the rule.
"""
# [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/create-rule
operation_id = "create_rule"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
header_payload["X-CS-USERNAME"] = cs_username
body_payload = body
returned = service_request(caller=self,
method="POST",
endpoint=target_url,
body=body_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def delete_rules(self: object, cs_username: str, parameters: dict = None, **kwargs) -> dict:
"""
Delete rules from a rule group by ID.
"""
# [DELETE] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/delete-rules
operation_id = "delete_rules"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}".replace("?ids={}", "")
header_payload = self.headers
header_payload["X-CS-USERNAME"] = cs_username
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="DELETE",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
def update_rules(self: object, body: dict, cs_username: str) -> dict:
"""
Update rules within a rule group. Return the updated rules.
"""
# [PATCH] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/update-rules
operation_id = "update_rules"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
header_payload["X-CS-USERNAME"] = cs_username
body_payload = body
returned = service_request(caller=self,
method="PATCH",
endpoint=target_url,
body=body_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
def validate(self: object, body: dict) -> dict:
"""
Validates field values and checks for matches if a test string is provided.
"""
# [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/validate
operation_id = "validate"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
body_payload = body
returned = service_request(caller=self,
method="POST",
endpoint=target_url,
body=body_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def query_patterns(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get all pattern severity IDs
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/query-patterns
operation_id = "query_patterns"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def query_platformsMixin0(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get all platform IDs.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/query-platformsMixin0
operation_id = "query_platformsMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def query_rule_groups_full(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Find all rule groups matching the query with optional filter.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/query-rule-groups-full
operation_id = "query_rule_groups_full"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def query_rule_groupsMixin0(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Finds all rule group IDs matching the query with optional filter.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/query-rule-groupsMixin0
operation_id = "query_rule_groupsMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def query_rule_types(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Get all rule type IDs.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/query-rule-types
operation_id = "query_rule_types"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def query_rulesMixin0(self: object, parameters: dict = None, **kwargs) -> dict:
"""
Finds all rule IDs matching the query with optional filter.
"""
# [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/query-rulesMixin0
operation_id = "query_rulesMixin0"
target_url = f"{self.base_url}{[ep[2] for ep in Endpoints if operation_id in ep[0]][0]}"
header_payload = self.headers
parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
returned = service_request(caller=self,
method="GET",
endpoint=target_url,
params=parameter_payload,
headers=header_payload,
verify=self.ssl_verify
)
return returned
| 50.742857 | 119 | 0.555602 | 19,321 | 0.906578 | 0 | 0 | 13,519 | 0.634337 | 0 | 0 | 7,704 | 0.361486 |
cc28f5fd9c4c6567c22450f12618546e4bf5cda0 | 233 | py | Python | Algorithms/Implementation/Utopian_Tree.py | gauthamkrishna-g/HackerRank | 472d7a56fc1c1c4f8f03fcabc09d08da4000efde | [
"MIT"
] | 1 | 2017-12-02T14:23:44.000Z | 2017-12-02T14:23:44.000Z | Algorithms/Implementation/Utopian_Tree.py | gauthamkrishna-g/HackerRank | 472d7a56fc1c1c4f8f03fcabc09d08da4000efde | [
"MIT"
] | null | null | null | Algorithms/Implementation/Utopian_Tree.py | gauthamkrishna-g/HackerRank | 472d7a56fc1c1c4f8f03fcabc09d08da4000efde | [
"MIT"
] | null | null | null | #!/bin/python3
h = 0
t = int(input().strip())
for a0 in range(t):
n = int(input().strip())
if n%2 == 1:
h = 2 ** (int(n/2) + 2) - 2
elif n%2 == 0:
h = 2 ** (int(n/2) + 1) - 1
print(h) | 23.3 | 36 | 0.381974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.064378 |
cc2966cfdb66f1e643f7d0786e36c50b3ffaaa9b | 3,319 | py | Python | pymtl3/passes/rtlir/behavioral/test/BehavioralRTLIRL4Pass_test.py | hsqforfun/pymtl3 | 05e06601cf262a663a95d1235cb99056ece84580 | [
"BSD-3-Clause"
] | 1 | 2019-11-12T12:26:01.000Z | 2019-11-12T12:26:01.000Z | pymtl3/passes/rtlir/behavioral/test/BehavioralRTLIRL4Pass_test.py | hsqforfun/pymtl3 | 05e06601cf262a663a95d1235cb99056ece84580 | [
"BSD-3-Clause"
] | null | null | null | pymtl3/passes/rtlir/behavioral/test/BehavioralRTLIRL4Pass_test.py | hsqforfun/pymtl3 | 05e06601cf262a663a95d1235cb99056ece84580 | [
"BSD-3-Clause"
] | null | null | null | #=========================================================================
# BehavioralRTLIRL4Pass_test.py
#=========================================================================
# Author : Peitian Pan
# Date : Feb 2, 2019
"""Test the level 4 behavioral RTLIR passes.
The L4 generation, L4 type check, and visualization passes are invoked. The
generation pass results are verified against a reference AST.
"""
import pytest
from pymtl3.datatypes import Bits32
from pymtl3.dsl import Component, Interface, OutPort
from pymtl3.dsl.errors import VarNotDeclaredError
from pymtl3.passes.rtlir.behavioral.BehavioralRTLIR import *
from pymtl3.passes.rtlir.behavioral.BehavioralRTLIRGenL4Pass import (
BehavioralRTLIRGenL4Pass,
)
from pymtl3.passes.rtlir.behavioral.BehavioralRTLIRTypeCheckL4Pass import (
BehavioralRTLIRTypeCheckL4Pass,
)
from pymtl3.passes.rtlir.behavioral.BehavioralRTLIRVisualizationPass import (
BehavioralRTLIRVisualizationPass,
)
from pymtl3.passes.rtlir.errors import PyMTLSyntaxError, PyMTLTypeError
from pymtl3.passes.rtlir.util.test_utility import do_test, expected_failure
def local_do_test( m ):
"""Check if generated behavioral RTLIR is the same as reference."""
m.elaborate()
m.apply( BehavioralRTLIRGenL4Pass() )
m.apply( BehavioralRTLIRTypeCheckL4Pass() )
m.apply( BehavioralRTLIRVisualizationPass() )
try:
ref = m._rtlir_test_ref
for blk in m.get_update_blocks():
upblk = m._pass_behavioral_rtlir_gen.rtlir_upblks[ blk ]
assert upblk == ref[ blk.__name__ ]
except AttributeError:
pass
#-------------------------------------------------------------------------
# Correct test cases
#-------------------------------------------------------------------------
def test_L4_interface_attr( do_test ):
class Ifc( Interface ):
def construct( s ):
s.foo = OutPort( Bits32 )
class A( Component ):
def construct( s ):
s.in_ = Ifc()
s.out = OutPort( Bits32 )
@s.update
def upblk():
s.out = s.in_.foo
a = A()
a._rtlir_test_ref = { 'upblk' : CombUpblk( 'upblk', [ Assign(
Attribute( Base( a ), 'out' ), Attribute(
Attribute( Base( a ), 'in_' ), 'foo' ), True ) ] ) }
do_test( a )
def test_L4_interface_array_index( do_test ):
class Ifc( Interface ):
def construct( s ):
s.foo = OutPort( Bits32 )
class A( Component ):
def construct( s ):
s.in_ = [ Ifc() for _ in range(4) ]
s.out = OutPort( Bits32 )
@s.update
def upblk():
s.out = s.in_[2].foo
a = A()
a._rtlir_test_ref = { 'upblk' : CombUpblk( 'upblk', [ Assign(
Attribute( Base( a ), 'out' ), Attribute( Index(
Attribute( Base( a ), 'in_' ), Number(2) ), 'foo' ), True ) ] ) }
do_test( a )
#-------------------------------------------------------------------------
# PyMTL type errors
#-------------------------------------------------------------------------
def test_L4_interface_no_field( do_test ):
class Ifc( Interface ):
def construct( s ):
s.foo = OutPort( Bits32 )
class A( Component ):
def construct( s ):
s.in_ = Ifc()
s.out = OutPort( Bits32 )
@s.update
def upblk():
s.out = s.in_.bar
with expected_failure( VarNotDeclaredError, 's.in_ does not have field "bar"' ):
do_test( A() )
| 32.861386 | 82 | 0.582706 | 736 | 0.221754 | 0 | 0 | 165 | 0.049714 | 0 | 0 | 903 | 0.27207 |
cc298addf220b3e99a59d78206d28959947caf14 | 2,287 | py | Python | api/views.py | seanpierce/django-itr | 01951612d4d49c328c89487efc83e65908c8ad58 | [
"MIT"
] | null | null | null | api/views.py | seanpierce/django-itr | 01951612d4d49c328c89487efc83e65908c8ad58 | [
"MIT"
] | null | null | null | api/views.py | seanpierce/django-itr | 01951612d4d49c328c89487efc83e65908c8ad58 | [
"MIT"
] | null | null | null | # import json
import uuid
from django.apps import apps
from django.core import serializers
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render
from django.shortcuts import redirect
from django.conf import settings
from .api_helpers import *
Episode = apps.get_model('episodes', 'Episode')
SubscriptionRequest = apps.get_model('subscribers', 'SubscriptionRequest')
Subscriber = apps.get_model('subscribers', 'Subscriber')
def get_episodes(request):
episodes = Episode.objects.all()
res = serializers.serialize("json", episodes)
return HttpResponse(res, content_type='application/json')
@csrf_exempt
def create_new_subscription_request(request):
if not valid_method('POST', request):
return error_response('Error: Method must be POST', 405)
email = request.POST.get('email', False)
if not email:
return error_response('Error: No email provided in request', 422)
subscription_request, created_new = SubscriptionRequest.objects.get_or_create(
email=email)
if not created_new:
subscription_request.token = uuid.uuid4()
subscription_request.save()
if send_confirmation_email(subscription_request):
return response('Email sent to ' + email)
else:
return error_response('Unable to send email to ' + email, 500)
def create_subscriber(request):
email = request.GET.get('email', False)
token = request.GET.get('token', False)
if (not email or not token):
return error_response("Error: Unable to process request. Missing information", 422)
subscription_request = SubscriptionRequest.objects.get(email=email, token=token)
if not subscription_request:
return error_response("Error: Subscription request not found", 404)
subscriber, created_new = Subscriber.objects.get_or_create(email=email)
if created_new:
exists = 'False'
else:
exists = 'True'
return redirect('/thanks/?email=' + email + '&exists=' + exists)
def thanks(request):
root = settings.HOST_URL
email = request.GET.get('email', False)
exists = request.GET.get('exists', False)
return render(request,
'api/thanks.html',
{
'email': email,
'root': root,
'exists': exists
})
| 29.320513 | 91 | 0.721032 | 0 | 0 | 0 | 0 | 702 | 0.306952 | 0 | 0 | 436 | 0.190643 |
cc2e428132969e828c476350489212606b733e28 | 1,446 | py | Python | saleor/graphql/csv/schema.py | ibutiti/saleor | fffe9a54c01aa07131102474dcb1519e0b59da74 | [
"BSD-3-Clause"
] | 2 | 2021-05-16T13:46:07.000Z | 2021-05-16T13:49:21.000Z | saleor/graphql/csv/schema.py | Niranjoyyengkhom/saleor | 4e6c4fe10476508e6b5c06fb1f38f9dc1dfc81a5 | [
"CC-BY-4.0"
] | 73 | 2021-06-07T04:47:10.000Z | 2022-03-14T04:52:43.000Z | saleor/graphql/csv/schema.py | Niranjoyyengkhom/saleor | 4e6c4fe10476508e6b5c06fb1f38f9dc1dfc81a5 | [
"CC-BY-4.0"
] | null | null | null | import graphene
from ...core.permissions import ProductPermissions
from ...core.tracing import traced_resolver
from ...csv import models
from ..core.fields import FilterInputConnectionField
from ..decorators import permission_required
from .filters import ExportFileFilterInput
from .mutations import ExportProducts
from .sorters import ExportFileSortingInput
from .types import ExportFile
class CsvQueries(graphene.ObjectType):
export_file = graphene.Field(
ExportFile,
id=graphene.Argument(
graphene.ID, description="ID of the export file job.", required=True
),
description="Look up a export file by ID.",
)
export_files = FilterInputConnectionField(
ExportFile,
filter=ExportFileFilterInput(description="Filtering options for export files."),
sort_by=ExportFileSortingInput(description="Sort export files."),
description="List of export files.",
)
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
@traced_resolver
def resolve_export_file(self, info, id):
return graphene.Node.get_node_from_global_id(info, id, ExportFile)
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
@traced_resolver
def resolve_export_files(self, info, query=None, sort_by=None, **kwargs):
return models.ExportFile.objects.all()
class CsvMutations(graphene.ObjectType):
export_products = ExportProducts.Field()
| 34.428571 | 88 | 0.748271 | 1,049 | 0.72545 | 0 | 0 | 399 | 0.275934 | 0 | 0 | 138 | 0.095436 |
cc2f488e2f264ad29352a56de79ed0853a4a7203 | 5,584 | py | Python | tests/test_pytest_localftpserver_TLS.py | pythrick/pytest-localftpserver | 63ac9046ef381a15620202fba90925aa263fe9d0 | [
"Apache-2.0"
] | 1 | 2020-02-16T11:46:39.000Z | 2020-02-16T11:46:39.000Z | tests/test_pytest_localftpserver_TLS.py | s-weigand/pytest-localftpserver | a2976dfabb435248512d568c126254e849529bfa | [
"Apache-2.0"
] | null | null | null | tests/test_pytest_localftpserver_TLS.py | s-weigand/pytest-localftpserver | a2976dfabb435248512d568c126254e849529bfa | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_pytest_localftpserver
----------------------------------
Tests for `pytest_localftpserver` module.
"""
import os
from ftplib import error_perm
import pytest
from .test_pytest_localftpserver import (ftp_login,
check_files_by_ftpclient,
close_client,
FILE_LIST)
from pytest_localftpserver.servers import (SimpleFTPServer,
WrongFixtureError,
DEFAULT_CERTFILE)
from pytest_localftpserver.helper_functions import InvalidCertificateError
def test_is_TLS(ftpserver_TLS):
assert ftpserver_TLS.uses_TLS is True
@pytest.mark.parametrize("anon",
[True, False])
def test_get_login_data(ftpserver_TLS, anon):
login_dict = ftpserver_TLS.get_login_data(style="dict")
assert login_dict["host"] == "localhost"
assert login_dict["port"] == ftpserver_TLS.server_port
if not anon:
assert login_dict["user"] == "fakeusername"
assert login_dict["passwd"] == "qweqwe"
login_url = ftpserver_TLS.get_login_data(style="url", anon=anon)
if anon:
base_url = "ftpes://localhost:"
else:
base_url = "ftpes://fakeusername:qweqwe@localhost:"
assert login_url == base_url + str(ftpserver_TLS.server_port)
def test_file_upload_user(ftpserver_TLS, tmpdir):
# makes sure to start with clean temp dirs
ftpserver_TLS.reset_tmp_dirs()
ftp = ftp_login(ftpserver_TLS, use_TLS=True)
ftp.cwd("/")
ftp.mkd("FOO")
ftp.cwd("FOO")
filename = "testfile.txt"
file_path_local = tmpdir.join(filename)
file_path_local.write("test")
with open(str(file_path_local), "rb") as f:
ftp.storbinary("STOR "+filename, f)
close_client(ftp)
assert os.path.isdir(os.path.join(ftpserver_TLS.server_home, "FOO"))
abs_file_path_server = os.path.join(ftpserver_TLS.server_home, "FOO", filename)
assert os.path.isfile(abs_file_path_server)
with open(abs_file_path_server, "r") as f:
assert f.read() == "test"
def test_file_upload_anon(ftpserver_TLS):
# anon user has no write privileges
ftp = ftp_login(ftpserver_TLS, anon=True, use_TLS=True)
ftp.cwd("/")
with pytest.raises(error_perm):
ftp.mkd("FOO")
close_client(ftp)
@pytest.mark.parametrize("anon",
[False, True])
def test_get_file_paths(tmpdir, ftpserver_TLS, anon):
# makes sure to start with clean temp dirs
ftpserver_TLS.reset_tmp_dirs()
base_path = ftpserver_TLS.get_local_base_path(anon=anon)
files_on_server = []
for dirs, filename in FILE_LIST:
dir_path = os.path.abspath(os.path.join(base_path, dirs))
if dirs != "":
os.makedirs(dir_path)
abs_file_path = os.path.join(dir_path, filename)
file_path = "/".join([dirs, filename]).lstrip("/")
files_on_server.append(file_path)
with open(abs_file_path, "a") as f:
f.write(filename)
path_iterable = list(ftpserver_TLS.get_file_paths(anon=anon))
assert len(path_iterable) == len(FILE_LIST)
# checking the files by rel_path to user home dir
# and native ftp client
check_files_by_ftpclient(ftpserver_TLS, tmpdir, files_on_server, path_iterable,
anon, use_TLS=True)
@pytest.mark.parametrize("style, read_mode", [
("path", "r"),
("content", "r"),
("content", "rb")
])
def test_ftpserver_TLS_get_cert(ftpserver_TLS, style, read_mode):
result = ftpserver_TLS.get_cert(style=style, read_mode=read_mode)
if style == "path":
assert result == DEFAULT_CERTFILE
else:
with open(DEFAULT_CERTFILE, read_mode) as certfile:
assert result == certfile.read()
def test_ftpserver_get_cert_exceptions(ftpserver, ftpserver_TLS):
with pytest.raises(WrongFixtureError,
match=r"The fixture ftpserver isn't using TLS, and thus"
r"has no certificate. Use ftpserver_TLS instead."):
ftpserver.get_cert()
# type errors
with pytest.raises(TypeError, match="The Argument `style` needs to be of type "
"``str``, the type given type was "
"``bool``."):
ftpserver.get_cert(style=True)
with pytest.raises(TypeError, match="The Argument `read_mode` needs to be of type "
"``str``, the type given type was "
"``bool``."):
ftpserver.get_cert(read_mode=True)
# value errors
with pytest.raises(ValueError, match="The Argument `style` needs to be of value "
"'path' or 'content', the given value was "
"'dict'."):
list(ftpserver.get_cert(style="dict"))
with pytest.raises(ValueError, match="The Argument `read_mode` needs to be of value "
"'r' or 'rb', the given value was "
"'invalid_option'."):
list(ftpserver.get_cert(read_mode="invalid_option"))
def test_wrong_cert_exception():
wrong_cert = os.path.abspath(os.path.join(os.path.dirname(__file__),
"not_a_valid_cert.pem"))
with pytest.raises(InvalidCertificateError):
SimpleFTPServer(use_TLS=True, certfile=wrong_cert)
| 36.496732 | 89 | 0.609062 | 0 | 0 | 0 | 0 | 2,102 | 0.376433 | 0 | 0 | 1,179 | 0.211139 |
cc2f548c6ca89c71575a09f40851faab6afaa94d | 5,975 | py | Python | data/data_augment.py | ZHANGHeng19931123/MutualGuide | acf317f48fa5af63d3aa41cd8ebe5586d9bdf033 | [
"MIT"
] | 124 | 2020-10-01T13:37:02.000Z | 2022-03-25T13:31:03.000Z | data/data_augment.py | ZHANGHeng19931123/MutualGuide | acf317f48fa5af63d3aa41cd8ebe5586d9bdf033 | [
"MIT"
] | 4 | 2020-10-09T06:20:42.000Z | 2021-12-19T22:28:50.000Z | data/data_augment.py | ZHANGHeng19931123/MutualGuide | acf317f48fa5af63d3aa41cd8ebe5586d9bdf033 | [
"MIT"
] | 18 | 2020-10-03T13:32:48.000Z | 2021-12-21T06:13:42.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import random
import math
import torch
def _crop(image, boxes, labels, p=0.75, min_iou=0.75, max_iou=0.25):
def matrix_iou(a, b):
lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
return area_i / area_a[:, np.newaxis]
if random.random() > p:
return (image, boxes, labels)
(height, width, _) = image.shape
while True:
scale = random.uniform(0.5, 1.)
min_ratio = max(0.5, scale * scale)
max_ratio = min(2, 1. / scale / scale)
ratio = math.sqrt(random.uniform(min_ratio, max_ratio))
w = int(scale * ratio * width)
h = int(scale / ratio * height)
l = random.randrange(width - w)
t = random.randrange(height - h)
roi = np.array((l, t, l + w, t + h))
iou = matrix_iou(boxes, roi[np.newaxis])
iou = iou[iou < min_iou]
iou = iou[iou >= max_iou]
if len(iou) > 0:
continue
image_t = image[roi[1]:roi[3], roi[0]:roi[2]]
centers = (boxes[:, :2] + boxes[:, 2:]) / 2
mask = np.logical_and(roi[:2] < centers, centers < roi[2:]).all(axis=1)
boxes_t = boxes[mask].copy()
labels_t = labels[mask].copy()
if len(boxes_t) == 0:
continue
boxes_t[:, :2] = np.maximum(boxes_t[:, :2], roi[:2])
boxes_t[:, :2] -= roi[:2]
boxes_t[:, 2:] = np.minimum(boxes_t[:, 2:], roi[2:])
boxes_t[:, 2:] -= roi[:2]
return (image_t, boxes_t, labels_t)
def _distort(image):
def _convert(image, alpha=1, beta=0):
tmp = image.astype(float) * alpha + beta
tmp[tmp < 0] = 0
tmp[tmp > 255] = 255
image[:] = tmp
image = image.copy()
if random.randrange(2):
_convert(image, beta=random.uniform(-32, 32))
if random.randrange(2):
_convert(image, alpha=random.uniform(0.5, 1.5))
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
if random.randrange(2):
tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)
tmp %= 180
image[:, :, 0] = tmp
if random.randrange(2):
_convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return image
def _expand(image, boxes, labels, p=0.75):
if random.random() > p:
return (image, boxes, labels)
(height, width, depth) = image.shape
while True:
scale = random.uniform(1, 2)
min_ratio = max(0.5, 1. / scale / scale)
max_ratio = min(2, scale * scale)
ratio = math.sqrt(random.uniform(min_ratio, max_ratio))
ws = scale * ratio
hs = scale / ratio
if ws < 1 or hs < 1:
continue
w = int(ws * width)
h = int(hs * height)
left = random.randint(0, w - width)
top = random.randint(0, h - height)
boxes_t = boxes.copy()
boxes_t[:, :2] += (left, top)
boxes_t[:, 2:] += (left, top)
expand_image = np.ones((h, w, depth)) * 114.0
expand_image[top:top + height, left:left + width] = image
image = expand_image
return (image, boxes_t, labels)
def _mirror(image, boxes):
(_, width, _) = image.shape
if random.randrange(2):
image = image[:, ::-1]
boxes = boxes.copy()
boxes[:, 0::2] = width - boxes[:, 2::-2]
return (image, boxes)
def preproc_for_test(image, insize, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), swap=(2, 0, 1)):
image = cv2.resize(image, (insize, insize), interpolation=cv2.INTER_LINEAR)
image = image.astype(np.float32)
image = image[:, :, ::-1]
image /= 255.0
if mean is not None:
image -= mean
if std is not None:
image /= std
image = image.transpose(swap)
image = np.ascontiguousarray(image, dtype=np.float32)
return image
def preproc_for_train(image, targets, insize):
boxes = targets[:, :-1].copy()
labels = targets[:, -1].copy()
if len(boxes) == 0:
targets = np.zeros((1, 5))
image = preproc_for_test(image, insize)
return (torch.from_numpy(image), targets)
image_o = image.copy()
targets_o = targets.copy()
(height_o, width_o, _) = image_o.shape
boxes_o = targets_o[:, :-1]
labels_o = targets_o[:, -1]
boxes_o[:, 0::2] /= width_o
boxes_o[:, 1::2] /= height_o
labels_o = np.expand_dims(labels_o, 1)
targets_o = np.hstack((boxes_o, labels_o))
image_t = _distort(image)
(image_t, boxes, labels) = _crop(image_t, boxes, labels)
(image_t, boxes, labels) = _expand(image_t, boxes, labels)
(image_t, boxes) = _mirror(image_t, boxes)
(height, width, _) = image_t.shape
image_t = preproc_for_test(image_t, insize)
boxes = boxes.copy()
boxes[:, 0::2] /= width
boxes[:, 1::2] /= height
b_w = (boxes[:, 2] - boxes[:, 0]) * 1.
b_h = (boxes[:, 3] - boxes[:, 1]) * 1.
mask_b = np.minimum(b_w, b_h) > (8. / insize)
boxes_t = boxes[mask_b]
labels_t = labels[mask_b].copy()
if len(boxes_t) == 0:
image = preproc_for_test(image_o, insize)
return (torch.from_numpy(image), targets_o)
labels_t = np.expand_dims(labels_t, 1)
targets_t = np.hstack((boxes_t, labels_t))
return (torch.from_numpy(image_t), targets_t)
def detection_collate(batch):
""" Custom collate fn for images and boxes """
targets = []
imgs = []
for _, sample in enumerate(batch):
for _, tup in enumerate(sample):
if torch.is_tensor(tup):
imgs.append(tup)
elif isinstance(tup, type(np.empty(0))):
annos = torch.from_numpy(tup).float()
targets.append(annos)
return (torch.stack(imgs, 0), targets)
| 29.726368 | 107 | 0.560335 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.014393 |
cc2fde9d0a241cbd89c2d79fba402f8dee594778 | 3,575 | py | Python | convnet_utils.py | ramosmy/DiverseBranchBlock | cd627d5089eaa25dedaa258b189fde508586a2f7 | [
"Apache-2.0"
] | null | null | null | convnet_utils.py | ramosmy/DiverseBranchBlock | cd627d5089eaa25dedaa258b189fde508586a2f7 | [
"Apache-2.0"
] | null | null | null | convnet_utils.py | ramosmy/DiverseBranchBlock | cd627d5089eaa25dedaa258b189fde508586a2f7 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
from diversebranchblock import DiverseBranchBlock
from acb import ACBlock
from dbb_transforms import transI_fusebn
CONV_BN_IMPL = 'base'
DEPLOY_FLAG = False
class ConvBN(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, deploy=False, nonlinear=None):
super().__init__()
if nonlinear is None:
self.nonlinear = nn.Identity()
else:
self.nonlinear = nonlinear
if deploy:
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=groups, bias=True)
else:
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=groups, bias=False)
self.bn = nn.BatchNorm2d(num_features=out_channels)
def forward(self, x):
if hasattr(self, 'bn'):
return self.nonlinear(self.bn(self.conv(x)))
else:
return self.nonlinear(self.conv(x))
def switch_to_deploy(self):
kernel, bias = transI_fusebn(self.conv.weight, self.bn)
conv = nn.Conv2d(in_channels=self.conv.in_channels, out_channels=self.conv.out_channels, kernel_size=self.conv.kernel_size,
stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=self.conv.groups, bias=True)
conv.weight.data = kernel
conv.bias.data = bias
for para in self.parameters():
para.detach_()
self.__delattr__('conv')
self.__delattr__('bn')
self.conv = conv
def conv_bn(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1):
if CONV_BN_IMPL == 'base' or kernel_size == 1 or kernel_size >= 7:
blk_type = ConvBN
elif CONV_BN_IMPL == 'ACB':
blk_type = ACBlock
else:
blk_type = DiverseBranchBlock
return blk_type(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, deploy=DEPLOY_FLAG)
def conv_bn_relu(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1):
if CONV_BN_IMPL == 'base' or kernel_size == 1 or kernel_size >= 7:
blk_type = ConvBN
elif CONV_BN_IMPL == 'ACB':
blk_type = ACBlock
else:
blk_type = DiverseBranchBlock
return blk_type(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, deploy=DEPLOY_FLAG, nonlinear=nn.ReLU())
def switch_conv_bn_impl(block_type):
assert block_type in ['base', 'DBB', 'ACB']
global CONV_BN_IMPL
CONV_BN_IMPL = block_type
def switch_deploy_flag(deploy):
global DEPLOY_FLAG
DEPLOY_FLAG = deploy
print('deploy flag: ', DEPLOY_FLAG)
def build_model(arch):
if arch == 'ResNet-18':
from resnet import create_Res18
model = create_Res18()
elif arch == 'ResNet-50':
from resnet import create_Res50
model = create_Res50()
elif arch == 'MobileNet':
from mobilenet import create_MobileNet
model = create_MobileNet()
else:
raise ValueError('TODO')
return model | 39.722222 | 154 | 0.655385 | 1,672 | 0.467692 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.031329 |
cc314cd567ba488be7f25e7b527b175fec18ba02 | 7,961 | py | Python | perde-tests/tests/test_attrs.py | YushiOMOTE/perde | beeb3208ea2d6edcc4df2b5d74834fadd2807fbc | [
"MIT"
] | 19 | 2020-10-29T11:38:19.000Z | 2022-03-13T03:14:21.000Z | perde-tests/tests/test_attrs.py | YushiOMOTE/perde | beeb3208ea2d6edcc4df2b5d74834fadd2807fbc | [
"MIT"
] | 19 | 2020-10-29T08:02:10.000Z | 2020-12-22T06:25:48.000Z | perde-tests/tests/test_attrs.py | YushiOMOTE/perde | beeb3208ea2d6edcc4df2b5d74834fadd2807fbc | [
"MIT"
] | 1 | 2021-05-06T07:38:20.000Z | 2021-05-06T07:38:20.000Z | from dataclasses import dataclass, field
from typing import Dict
import perde
import pytest
from util import FORMATS, FORMATS_EXCEPT
"""rust
#[derive(Serialize, Debug, new)]
struct Plain {
a: String,
b: String,
c: u64,
}
add!(Plain {"xxx".into(), "yyy".into(), 3});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_plain(m):
@dataclass
class Plain:
a: str
b: str
c: int
m.repack_type(Plain)
"""rust
#[derive(Serialize, Debug, new)]
#[serde(rename_all = "camelCase")]
struct RenameAll {
pen_pineapple: String,
apple_pen: String,
}
add!(RenameAll {"xxx".into(), "yyy".into()});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_rename_all(m):
@perde.attr(rename_all="camelCase")
@dataclass
class RenameAll:
pen_pineapple: str
apple_pen: str
m.repack_type(RenameAll)
"""rust
#[derive(Serialize, Debug, new)]
#[serde(rename = "RenameAllSerialize", rename_all = "PascalCase")]
struct RenameAllSerializeOutput {
pen_pineapple: String,
apple_pen: String,
}
#[derive(Serialize, Debug, new)]
#[serde(rename = "RenameAllSerialize")]
struct RenameAllSerializeInput {
pen_pineapple: String,
apple_pen: String,
}
add!(RenameAllSerializeInput {"--".into(), "==".into()});
add!(RenameAllSerializeOutput {"--".into(), "==".into()});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_rename_all_serialize(m):
@perde.attr(rename_all_serialize="PascalCase")
@dataclass
class RenameAllSerialize:
pen_pineapple: str
apple_pen: str
d = m.unpack_data("RenameAllSerializeInput", astype=RenameAllSerialize)
v = m.dumps(d)
e = m.data("RenameAllSerializeOutput")
assert v == e
"""rust
#[derive(Serialize, Debug, new)]
#[serde(rename = "RenameAllDeserialize")]
struct RenameAllDeserializeOutput {
pen_pineapple: String,
apple_pen: String,
}
#[derive(Serialize, Debug, new)]
#[serde(rename = "RenameAllDeserialize", rename_all = "SCREAMING_SNAKE_CASE")]
struct RenameAllDeserializeInput {
pen_pineapple: String,
apple_pen: String,
}
add!(RenameAllDeserializeInput {"--".into(), "==".into()});
add!(RenameAllDeserializeOutput {"--".into(), "==".into()});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_rename_all_deserialize(m):
@perde.attr(rename_all_deserialize="SCREAMING_SNAKE_CASE")
@dataclass
class RenameAllDeserialize:
pen_pineapple: str
apple_pen: str
d = m.unpack_data("RenameAllDeserializeInput", astype=RenameAllDeserialize)
v = m.dumps(d)
e = m.data("RenameAllDeserializeOutput")
assert v == e
"""rust
#[derive(Serialize, Debug, new)]
struct DenyUnknownFields {
x: String,
y: i64,
z: i64,
q: String,
}
add!(DenyUnknownFields {"aaaaa".into(), 1, -2, "unknown".into()});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_deny_unknown_fields(m):
@dataclass
class NoDenyUnknownFields:
x: str
y: int
z: int
@perde.attr(deny_unknown_fields=True)
@dataclass
class DenyUnknownFields:
x: str
y: int
z: int
e = m.unpack_data("DenyUnknownFields", astype=NoDenyUnknownFields)
assert e == NoDenyUnknownFields("aaaaa", 1, -2)
with pytest.raises(Exception) as e:
m.unpack_data("DenyUnknownFields", astype=DenyUnknownFields)
print(f"{e}")
"""rust
#[derive(Serialize, Debug, new)]
struct Rename {
a: String,
#[serde(rename = "x")]
b: String,
c: u64,
}
add!(Rename {"xxx".into(), "yyy".into(), 3});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_rename(m):
@dataclass
class Rename:
a: str
b: str = field(metadata={"perde_rename": "x"})
c: int
m.repack_type(Rename)
"""rust
#[derive(Serialize, Debug, new)]
#[serde(rename_all = "camelCase")]
struct RenameAllRename {
pen_pineapple: String,
#[serde(rename = "pen_pen")]
apple_pen: String,
}
add!(RenameAllRename {"xxx".into(), "yyy".into()});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_rename_in_rename_all(m):
@perde.attr(rename_all="camelCase")
@dataclass
class RenameAllRename:
pen_pineapple: str
apple_pen: str = field(metadata={"perde_rename": "pen_pen"})
m.repack_type(RenameAllRename)
"""rust
#[derive(Serialize, Debug, new)]
struct NestedRenameChild {
a: String,
#[serde(rename = "d")]
b: String,
}
#[derive(Serialize, Debug, new)]
struct NestedRename {
x: String,
#[serde(rename = "w")]
y: NestedRenameChild,
z: i64,
}
add!(NestedRename
{"xxx".into(),
NestedRenameChild::new("ppp".into(), "qqq".into()),
1111}
except "toml");
"""
@pytest.mark.parametrize("m", FORMATS_EXCEPT("toml"))
def test_nested_rename(m):
@dataclass
class NestedRenameChild:
a: str
b: str = field(metadata={"perde_rename": "d"})
@dataclass
class NestedRename:
x: str
y: NestedRenameChild = field(metadata={"perde_rename": "w"})
z: int
m.repack_type(NestedRename)
"""rust
#[derive(Serialize, Debug, new)]
#[serde(rename_all = "UPPERCASE")]
struct NestedRenameAllChild {
a: String,
b: String,
}
#[derive(Serialize, Debug, new)]
struct NestedRenameAll {
x: String,
y: NestedRenameAllChild,
z: i64,
}
add!(NestedRenameAll
{"xxx".into(),
NestedRenameAllChild::new("ppp".into(), "qqq".into()),
1111}
except "toml");
"""
@pytest.mark.parametrize("m", FORMATS_EXCEPT("toml"))
def test_nested_rename_all(m):
@perde.attr(rename_all="UPPERCASE")
@dataclass
class NestedRenameAllChild:
a: str
b: str
@dataclass
class NestedRenameAll:
x: str
y: NestedRenameAllChild
z: int
m.repack_type(NestedRenameAll)
"""rust
#[derive(Serialize, Debug, new)]
struct FlattenChild {
a: String,
b: String,
}
#[derive(Serialize, Debug, new)]
struct Flatten {
x: String,
#[serde(flatten)]
y: FlattenChild,
z: i64,
}
add!(Flatten
{"xxx".into(),
FlattenChild::new("ppp".into(), "qqq".into()),
1111}
except "msgpack");
"""
@pytest.mark.parametrize("m", FORMATS_EXCEPT("msgpack"))
def test_flatten(m):
@dataclass
class FlattenChild:
a: str
b: str
@dataclass
class Flatten:
x: str
y: FlattenChild = field(metadata={"perde_flatten": True})
z: int
m.repack_type(Flatten)
"""rust
#[derive(Serialize, Debug, new)]
struct DictFlatten {
x: String,
y: i64,
#[serde(flatten)]
z: IndexMap<String, String>,
}
add!(DictFlatten {"hey".into(), -103223,
{
let mut m = IndexMap::new();
m.insert("pp".into(), "q1".into());
m.insert("ppp".into(), "q2".into());
m.insert("pppp".into(), "q3".into());
m
}}
except "msgpack");
"""
@pytest.mark.parametrize("m", FORMATS_EXCEPT("msgpack"))
def test_dict_flatten(m):
@dataclass
class DictFlatten:
x: str
y: int
z: Dict[str, str] = field(metadata={"perde_flatten": True})
m.repack_type(DictFlatten)
"""rust
#[derive(Serialize, Debug, new)]
struct Flatten2 {
x: String,
a: i64,
b: i64,
}
add!(Flatten2 { "haa".into(), 11, 33 });
"""
@pytest.mark.parametrize("m", FORMATS)
def test_flatten2(m):
@dataclass
class Flatten2Child:
a: int
b: int
@dataclass
class Flatten2:
x: str
y: Flatten2Child = field(metadata={"perde_flatten": True})
m.repack_type(Flatten2)
"""rust
#[derive(Serialize, Debug, new)]
struct DictFlatten2 {
x: String,
y: i64,
pp: String,
ppp: String,
pppp: String,
}
add!(DictFlatten2 {
"hey".into(), -103223,
"q1".into(), "q2".into(), "q3".into()
});
"""
# Hopefully support msgpack.
@pytest.mark.parametrize("m", FORMATS_EXCEPT("msgpack"))
def test_dict_flatten2(m):
@dataclass
class DictFlatten2:
x: str
y: int
z: Dict[str, str] = field(metadata={"perde_flatten": True})
m.repack_type(DictFlatten2)
| 19.464548 | 79 | 0.633589 | 1,523 | 0.191308 | 0 | 0 | 3,977 | 0.49956 | 0 | 0 | 4,209 | 0.528702 |
cc3218c16346f01e52545769fbe6392d9edf32f1 | 3,045 | py | Python | python/example_code/update_connectNW.py | nifcloud/nifcloud-sample | 336773d8f0a2fb842fc20bb33da9ba83c5ed4457 | [
"Apache-2.0"
] | null | null | null | python/example_code/update_connectNW.py | nifcloud/nifcloud-sample | 336773d8f0a2fb842fc20bb33da9ba83c5ed4457 | [
"Apache-2.0"
] | 3 | 2022-01-21T14:07:01.000Z | 2022-01-24T02:11:05.000Z | python/example_code/update_connectNW.py | nifcloud/nifcloud-sample | 336773d8f0a2fb842fc20bb33da9ba83c5ed4457 | [
"Apache-2.0"
] | null | null | null | from nifcloud import session
import sys
# --- define --------
# -- Server -------
SERVER_NAME = "testsv"
# --------------------
# -- PRIVATE NW -------
PRIVATE_NW_NAME = 'test'
PRIVATE_NW_IP = 'static'
# --------------------
# -------------------
# ------ update attribute --------------------
def wait_for_instance_running(client, instance_name):
print("wait : ", sys._getframe().f_code.co_name)
try:
waiter = client.get_waiter('instance_running')
wait_result = waiter.wait(
InstanceId=[instance_name, ],
Tenancy=['all', ],
WaiterConfig={
'Delay': 30,
'MaxAttempts': 40
}
)
except Exception as e:
print("exception :", e, "\nin :", sys._getframe().f_code.co_name)
finally:
return wait_result
def wait_for_instance_warning(client):
print("wait : ", sys._getframe().f_code.co_name)
try:
waiter = client.get_waiter('instance_warning')
wait_result = waiter.wait(
InstanceId=[SERVER_NAME, ],
Tenancy=['all', ],
WaiterConfig={
'Delay': 30,
'MaxAttempts': 40
}
)
except Exception as e:
print("exception :", e, "\nin :", sys._getframe().f_code.co_name)
finally:
return wait_result
# ---- change the private Network to which the private NIC is connected
def update_private_network(client, server_name):
try:
"""
client.nifty_update_instance_network_interfaces(
# Target Instance Name
InstanceId='string',
# After Network Config
NetworkInterface=[
{
#Select Setting Network.Exclusive NetworkName
'NetworkId' : 'string',
#Select Setting Network.Exclusive NetwokId
'NetworkName' : 'string',
#See also NetworkInterface.n.IpAddress in
#https://pfs.nifcloud.com/api/rest/NiftyUpdateInstanceNetworkInterfaces.htm
'IpAddress' : 'string',
},
],
# Reboot Option
# force:Force reboot
# true:Normal ACPI Reboot(default)
# false:Not Reboot
NiftyReboot='true',
)
"""
client.nifty_update_instance_network_interfaces(
InstanceId=server_name,
# After Network Config
NetworkInterface=[
{
'NetworkName': PRIVATE_NW_NAME,
'IpAddress': PRIVATE_NW_IP,
},
],
NiftyReboot='true',
)
print("Private Network Change")
except Exception as e:
print("exception :", e, "\nin :", sys._getframe().f_code.co_name)
sys.exit(1)
# -------------- main ----------------
client = session.get_session().create_client(
"computing",
region_name="jp-east-2",
)
update_private_network(client)
| 28.457944 | 95 | 0.515928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,438 | 0.47225 |
cc33c48eea92c7502c4f53b7bb13b5093634f0ef | 15,646 | py | Python | src/slack_scim/v1/groups.py | broadinstitute/python-slack-scim | d8f73df78b959b320016723cd015c71d235865fb | [
"MIT"
] | null | null | null | src/slack_scim/v1/groups.py | broadinstitute/python-slack-scim | d8f73df78b959b320016723cd015c71d235865fb | [
"MIT"
] | null | null | null | src/slack_scim/v1/groups.py | broadinstitute/python-slack-scim | d8f73df78b959b320016723cd015c71d235865fb | [
"MIT"
] | null | null | null | # To use this code, make sure you
#
# import json
#
# and then, to convert JSON from a string, do
#
# result = groups_from_dict(json.loads(json_string))
from typing import Optional, Any, List, TypeVar, Type, cast, Callable
T = TypeVar("T")
def from_str(x: Any) -> str:
assert isinstance(x, str)
return x
def from_none(x: Any) -> Any:
assert x is None
return x
def from_union(fs, x):
for f in fs:
try:
return f(x)
except:
pass
assert False
def to_class(c: Type[T], x: Any) -> dict:
assert isinstance(x, c)
return cast(Any, x).to_dict()
def from_bool(x: Any) -> bool:
assert isinstance(x, bool)
return x
def from_list(f: Callable[[Any], T], x: Any) -> List[T]:
assert isinstance(x, list)
return [f(y) for y in x]
def from_int(x: Any) -> int:
assert isinstance(x, int) and not isinstance(x, bool)
return x
class GroupsMember:
display: Optional[str]
value: Optional[str]
def __init__(self, display: Optional[str], value: Optional[str]) -> None:
self.display = display
self.value = value
@staticmethod
def from_dict(obj: Any) -> 'GroupsMember':
assert isinstance(obj, dict)
display = from_union([from_str, from_none], obj.get("display"))
value = from_union([from_str, from_none], obj.get("value"))
return GroupsMember(display, value)
def to_dict(self) -> dict:
result: dict = {}
result["display"] = from_union([from_str, from_none], self.display)
result["value"] = from_union([from_str, from_none], self.value)
return result
class Meta:
created: Optional[str]
location: Optional[str]
def __init__(self, created: Optional[str], location: Optional[str]) -> None:
self.created = created
self.location = location
@staticmethod
def from_dict(obj: Any) -> 'Meta':
assert isinstance(obj, dict)
created = from_union([from_str, from_none], obj.get("created"))
location = from_union([from_str, from_none], obj.get("location"))
return Meta(created, location)
def to_dict(self) -> dict:
result: dict = {}
result["created"] = from_union([from_str, from_none], self.created)
result["location"] = from_union([from_str, from_none], self.location)
return result
class Name:
family_name: Optional[str]
given_name: Optional[str]
honorific_prefix: Optional[str]
def __init__(self, family_name: Optional[str], given_name: Optional[str], honorific_prefix: Optional[str]) -> None:
self.family_name = family_name
self.given_name = given_name
self.honorific_prefix = honorific_prefix
@staticmethod
def from_dict(obj: Any) -> 'Name':
assert isinstance(obj, dict)
family_name = from_union([from_str, from_none], obj.get("familyName"))
given_name = from_union([from_str, from_none], obj.get("givenName"))
honorific_prefix = from_union([from_str, from_none], obj.get("honorificPrefix"))
return Name(family_name, given_name, honorific_prefix)
def to_dict(self) -> dict:
result: dict = {}
result["familyName"] = from_union([from_str, from_none], self.family_name)
result["givenName"] = from_union([from_str, from_none], self.given_name)
result["honorificPrefix"] = from_union([from_str, from_none], self.honorific_prefix)
return result
class Manager:
manager_id: Optional[str]
def __init__(self, manager_id: Optional[str]) -> None:
self.manager_id = manager_id
@staticmethod
def from_dict(obj: Any) -> 'Manager':
assert isinstance(obj, dict)
manager_id = from_union([from_str, from_none], obj.get("managerId"))
return Manager(manager_id)
def to_dict(self) -> dict:
result: dict = {}
result["managerId"] = from_union([from_str, from_none], self.manager_id)
return result
class UrnScimSchemasExtensionEnterprise10:
cost_center: Optional[str]
department: Optional[str]
division: Optional[str]
employee_number: Optional[str]
manager: Optional[Manager]
organization: Optional[str]
def __init__(self, cost_center: Optional[str], department: Optional[str], division: Optional[str], employee_number: Optional[str], manager: Optional[Manager], organization: Optional[str]) -> None:
self.cost_center = cost_center
self.department = department
self.division = division
self.employee_number = employee_number
self.manager = manager
self.organization = organization
@staticmethod
def from_dict(obj: Any) -> 'UrnScimSchemasExtensionEnterprise10':
assert isinstance(obj, dict)
cost_center = from_union([from_str, from_none], obj.get("costCenter"))
department = from_union([from_str, from_none], obj.get("department"))
division = from_union([from_str, from_none], obj.get("division"))
employee_number = from_union([from_str, from_none], obj.get("employeeNumber"))
manager = from_union([Manager.from_dict, from_none], obj.get("manager"))
organization = from_union([from_str, from_none], obj.get("organization"))
return UrnScimSchemasExtensionEnterprise10(cost_center, department, division, employee_number, manager, organization)
def to_dict(self) -> dict:
result: dict = {}
result["costCenter"] = from_union([from_str, from_none], self.cost_center)
result["department"] = from_union([from_str, from_none], self.department)
result["division"] = from_union([from_str, from_none], self.division)
result["employeeNumber"] = from_union([from_str, from_none], self.employee_number)
result["manager"] = from_union([lambda x: to_class(Manager, x), from_none], self.manager)
result["organization"] = from_union([from_str, from_none], self.organization)
return result
class ResourceMember:
active: Optional[bool]
display_name: Optional[str]
external_id: Optional[str]
id: Optional[str]
locale: Optional[str]
meta: Optional[Meta]
name: Optional[Name]
nick_name: Optional[str]
password: Optional[str]
preferred_language: Optional[str]
profile_url: Optional[str]
schemas: Optional[List[str]]
timezone: Optional[str]
title: Optional[str]
urn_scim_schemas_extension_enterprise_10: Optional[UrnScimSchemasExtensionEnterprise10]
user_name: Optional[str]
user_type: Optional[str]
def __init__(self, active: Optional[bool], display_name: Optional[str], external_id: Optional[str], id: Optional[str], locale: Optional[str], meta: Optional[Meta], name: Optional[Name], nick_name: Optional[str], password: Optional[str], preferred_language: Optional[str], profile_url: Optional[str], schemas: Optional[List[str]], timezone: Optional[str], title: Optional[str], urn_scim_schemas_extension_enterprise_10: Optional[UrnScimSchemasExtensionEnterprise10], user_name: Optional[str], user_type: Optional[str]) -> None:
self.active = active
self.display_name = display_name
self.external_id = external_id
self.id = id
self.locale = locale
self.meta = meta
self.name = name
self.nick_name = nick_name
self.password = password
self.preferred_language = preferred_language
self.profile_url = profile_url
self.schemas = schemas
self.timezone = timezone
self.title = title
self.urn_scim_schemas_extension_enterprise_10 = urn_scim_schemas_extension_enterprise_10
self.user_name = user_name
self.user_type = user_type
@staticmethod
def from_dict(obj: Any) -> 'ResourceMember':
assert isinstance(obj, dict)
active = from_union([from_bool, from_none], obj.get("active"))
display_name = from_union([from_str, from_none], obj.get("displayName"))
external_id = from_union([from_str, from_none], obj.get("externalId"))
id = from_union([from_str, from_none], obj.get("id"))
locale = from_union([from_str, from_none], obj.get("locale"))
meta = from_union([Meta.from_dict, from_none], obj.get("meta"))
name = from_union([Name.from_dict, from_none], obj.get("name"))
nick_name = from_union([from_str, from_none], obj.get("nickName"))
password = from_union([from_str, from_none], obj.get("password"))
preferred_language = from_union([from_str, from_none], obj.get("preferredLanguage"))
profile_url = from_union([from_str, from_none], obj.get("profileUrl"))
schemas = from_union([lambda x: from_list(from_str, x), from_none], obj.get("schemas"))
timezone = from_union([from_str, from_none], obj.get("timezone"))
title = from_union([from_str, from_none], obj.get("title"))
urn_scim_schemas_extension_enterprise_10 = from_union([UrnScimSchemasExtensionEnterprise10.from_dict, from_none], obj.get("urn:scim:schemas:extension:enterprise:1.0"))
user_name = from_union([from_str, from_none], obj.get("userName"))
user_type = from_union([from_str, from_none], obj.get("userType"))
return ResourceMember(active, display_name, external_id, id, locale, meta, name, nick_name, password, preferred_language, profile_url, schemas, timezone, title, urn_scim_schemas_extension_enterprise_10, user_name, user_type)
def to_dict(self) -> dict:
result: dict = {}
result["active"] = from_union([from_bool, from_none], self.active)
result["displayName"] = from_union([from_str, from_none], self.display_name)
result["externalId"] = from_union([from_str, from_none], self.external_id)
result["id"] = from_union([from_str, from_none], self.id)
result["locale"] = from_union([from_str, from_none], self.locale)
result["meta"] = from_union([lambda x: to_class(Meta, x), from_none], self.meta)
result["name"] = from_union([lambda x: to_class(Name, x), from_none], self.name)
result["nickName"] = from_union([from_str, from_none], self.nick_name)
result["password"] = from_union([from_str, from_none], self.password)
result["preferredLanguage"] = from_union([from_str, from_none], self.preferred_language)
result["profileUrl"] = from_union([from_str, from_none], self.profile_url)
result["schemas"] = from_union([lambda x: from_list(from_str, x), from_none], self.schemas)
result["timezone"] = from_union([from_str, from_none], self.timezone)
result["title"] = from_union([from_str, from_none], self.title)
result["urn:scim:schemas:extension:enterprise:1.0"] = from_union([lambda x: to_class(UrnScimSchemasExtensionEnterprise10, x), from_none], self.urn_scim_schemas_extension_enterprise_10)
result["userName"] = from_union([from_str, from_none], self.user_name)
result["userType"] = from_union([from_str, from_none], self.user_type)
return result
class Resource:
display_name: Optional[str]
id: Optional[str]
members: Optional[List[ResourceMember]]
meta: Optional[Meta]
schemas: Optional[List[str]]
def __init__(self, display_name: Optional[str], id: Optional[str], members: Optional[List[ResourceMember]], meta: Optional[Meta], schemas: Optional[List[str]]) -> None:
self.display_name = display_name
self.id = id
self.members = members
self.meta = meta
self.schemas = schemas
@staticmethod
def from_dict(obj: Any) -> 'Resource':
assert isinstance(obj, dict)
display_name = from_union([from_str, from_none], obj.get("displayName"))
id = from_union([from_str, from_none], obj.get("id"))
members = from_union([lambda x: from_list(ResourceMember.from_dict, x), from_none], obj.get("members"))
meta = from_union([Meta.from_dict, from_none], obj.get("meta"))
schemas = from_union([lambda x: from_list(from_str, x), from_none], obj.get("schemas"))
return Resource(display_name, id, members, meta, schemas)
def to_dict(self) -> dict:
result: dict = {}
result["displayName"] = from_union([from_str, from_none], self.display_name)
result["id"] = from_union([from_str, from_none], self.id)
result["members"] = from_union([lambda x: from_list(lambda x: to_class(ResourceMember, x), x), from_none], self.members)
result["meta"] = from_union([lambda x: to_class(Meta, x), from_none], self.meta)
result["schemas"] = from_union([lambda x: from_list(from_str, x), from_none], self.schemas)
return result
class Groups:
display_name: Optional[str]
id: Optional[str]
items_per_page: Optional[int]
members: Optional[List[GroupsMember]]
meta: Optional[Meta]
resources: Optional[List[Resource]]
schemas: Optional[List[str]]
start_index: Optional[int]
total_results: Optional[int]
def __init__(self, display_name: Optional[str], id: Optional[str], items_per_page: Optional[int], members: Optional[List[GroupsMember]], meta: Optional[Meta], resources: Optional[List[Resource]], schemas: Optional[List[str]], start_index: Optional[int], total_results: Optional[int]) -> None:
self.display_name = display_name
self.id = id
self.items_per_page = items_per_page
self.members = members
self.meta = meta
self.resources = resources
self.schemas = schemas
self.start_index = start_index
self.total_results = total_results
@staticmethod
def from_dict(obj: Any) -> 'Groups':
assert isinstance(obj, dict)
display_name = from_union([from_str, from_none], obj.get("displayName"))
id = from_union([from_str, from_none], obj.get("id"))
items_per_page = from_union([from_int, from_none], obj.get("itemsPerPage"))
members = from_union([lambda x: from_list(GroupsMember.from_dict, x), from_none], obj.get("members"))
meta = from_union([Meta.from_dict, from_none], obj.get("meta"))
resources = from_union([lambda x: from_list(Resource.from_dict, x), from_none], obj.get("Resources"))
schemas = from_union([lambda x: from_list(from_str, x), from_none], obj.get("schemas"))
start_index = from_union([from_int, from_none], obj.get("startIndex"))
total_results = from_union([from_int, from_none], obj.get("totalResults"))
return Groups(display_name, id, items_per_page, members, meta, resources, schemas, start_index, total_results)
def to_dict(self) -> dict:
result: dict = {}
result["displayName"] = from_union([from_str, from_none], self.display_name)
result["id"] = from_union([from_str, from_none], self.id)
result["itemsPerPage"] = from_union([from_int, from_none], self.items_per_page)
result["members"] = from_union([lambda x: from_list(lambda x: to_class(GroupsMember, x), x), from_none], self.members)
result["meta"] = from_union([lambda x: to_class(Meta, x), from_none], self.meta)
result["Resources"] = from_union([lambda x: from_list(lambda x: to_class(Resource, x), x), from_none], self.resources)
result["schemas"] = from_union([lambda x: from_list(from_str, x), from_none], self.schemas)
result["startIndex"] = from_union([from_int, from_none], self.start_index)
result["totalResults"] = from_union([from_int, from_none], self.total_results)
return result
def groups_from_dict(s: Any) -> Groups:
return Groups.from_dict(s)
def groups_to_dict(x: Groups) -> Any:
return to_class(Groups, x)
| 44.448864 | 530 | 0.676786 | 14,554 | 0.930206 | 0 | 0 | 5,176 | 0.330819 | 0 | 0 | 1,241 | 0.079317 |
cc346dfe91e9034dc6d61a929ef9d68fe6e92496 | 12,585 | py | Python | scripts/mercedes_api_connector_bootstrap.py | eccenca/DataspaceConnector | 82492cbb5b6a6919c774a011fb036b67fe08d6c8 | [
"Apache-2.0"
] | null | null | null | scripts/mercedes_api_connector_bootstrap.py | eccenca/DataspaceConnector | 82492cbb5b6a6919c774a011fb036b67fe08d6c8 | [
"Apache-2.0"
] | null | null | null | scripts/mercedes_api_connector_bootstrap.py | eccenca/DataspaceConnector | 82492cbb5b6a6919c774a011fb036b67fe08d6c8 | [
"Apache-2.0"
] | null | null | null |
import requests
import pprint
import json
# Suppress ssl verification warning
requests.packages.urllib3.disable_warnings()
s = requests.Session()
s.auth = ("user", "password")
s.verify = False
host = "localhost"
apis = ["https://api.mercedes-benz.com/vehicledata/v2/vehicles", "https://api.mercedes-benz.com/vehicledata/v2/vehicles" , "https://api.mercedes-benz.com/hazard_warnings/v2", "https://api.mercedes-benz.com/vehicledata_tryout/v2/vehicles", "https://api.mercedes-benz.com/vehicledata_tryout/v2/vehicles"]
licenses = [["Fuel Status", "https://developer.mercedes-benz.com/products/hazard_warnings/details" ],
["Electric Vehicle Status", "https://developer.mercedes-benz.com/products/electric_vehicle_status/details" ],
["Hazard Warnings", "https://developer.mercedes-benz.com/products/hazard_warnings/details" ],
["Fuel Status Tryout", "https://api.mercedes-benz.com/vehicledata_tryout/v2/vehicles" ],
["Electric Vehicle Status Tryout", "https://api.mercedes-benz.com/vehicledata_tryout/v2/vehicles" ]]
offers = [
{
"title": "Fuel Status",
"description": "The Fuel Status data set provides fuel level and the remaining vehicle range of connected vehicles. Applications from fuel suppliers could give Mercedes-Benz drivers individual offers at the right time.",
"keywords": [
"Fuel Status"
],
"publisher": "https://mercedes-benz.com",
"language": "EN",
"license": "https://developer.mercedes-benz.com/products/fuel_status/details",
"sovereign": "https://mercedes-benz.com",
"endpointDocumentation": "https://developer.mercedes-benz.com/products/fuel_status",
"Mantainer": "http://eccenca.com",
"Contact": "edgard.marx@eccenca.com"
},
{
"title": "Electric Vehicle Status",
"description": "The Electric Vehicle Status data set provides charge and remaining range of a specific electric vehicle. Knowing these current values, the next charging stop can be predicted.",
"keywords": [
"Electric Vehicle Status"
],
"publisher": "https://mercedes-benz.com",
"language": "EN",
"license": "https://developer.mercedes-benz.com/products/electric_vehicle_status/details",
"sovereign": "https://mercedes-benz.com",
"endpointDocumentation": "https://developer.mercedes-benz.com/products/electric_vehicle_status",
"Mantainer": "http://eccenca.com",
"Contact": "edgard.marx@eccenca.com"
},
{
"title": "Hazard Warnings",
"description": "Benefit from aggregated event data from our connected vehicle fleet to alert your drivers ahead of any dangerous situation. The data set consists of different types of safety-related events, ranging from dangerous traffic events to weather conditions.",
"keywords": [
"Hazard Warnings"
],
"publisher": "https://mercedes-benz.com",
"language": "EN",
"license": "https://developer.mercedes-benz.com/products/hazard_warnings/details",
"sovereign": "https://mercedes-benz.com",
"endpointDocumentation": "https://developer.mercedes-benz.com/products/hazard_warnings",
"Mantainer": "http://eccenca.com",
"Contact": "edgard.marx@eccenca.com"
},
{
"title": "Fuel Status Tryout",
"description": "This is a sandbox for Fuel Status data set provides fuel level and the remaining vehicle range of connected vehicles. Applications from fuel suppliers could give Mercedes-Benz drivers individual offers at the right time.",
"keywords": [
"Fuel Status"
],
"publisher": "https://mercedes-benz.com",
"language": "EN",
"license": "https://developer.mercedes-benz.com/products/fuel_status/details",
"sovereign": "https://mercedes-benz.com",
"endpointDocumentation": "https://developer.mercedes-benz.com/products/fuel_status",
"Mantainer": "http://eccenca.com",
"Contact": "edgard.marx@eccenca.com"
},
{
"title": "Electric Vehicle Status Tryout",
"description": "This is a sandbox for Electric Vehicle Status data set provides charge and remaining range of a specific electric vehicle. Knowing these current values, the next charging stop can be predicted.",
"keywords": [
"Electric Vehicle Status"
],
"publisher": "https://mercedes-benz.com",
"language": "EN",
"license": "https://developer.mercedes-benz.com/products/electric_vehicle_status/details",
"sovereign": "https://mercedes-benz.com",
"endpointDocumentation": "https://developer.mercedes-benz.com/products/electric_vehicle_status",
"Mantainer": "http://eccenca.com",
"Contact": "edgard.marx@eccenca.com"
}
]
representations = [{
"title": "Fuel Status",
"description": "Data representation of Fuel Status data.",
"mediaType": "JSON",
"language": "EN",
"example": "https://github.com/eccenca/DaimlerDataspaceSharedData/blob/main/fuel-status.json"
},
{
"title": "Electric Vehicle Status",
"description": "Data representation of Electric Vehicle Status.",
"mediaType": "JSON",
"language": "EN",
"example": "https://github.com/eccenca/DaimlerDataspaceSharedData/blob/main/electric-vehicle-status.json"
},
{
"title": "Hazard Warnings",
"description": "Data representation of Hazard Warnings data.",
"mediaType": "JSON",
"language": "EN",
"example": "https://github.com/eccenca/DaimlerDataspaceSharedData/blob/main/harzard-warnings.json"
},
{
"title": "Fuel Status Tyout",
"description": "Data representation of Fuel Status data.",
"mediaType": "JSON",
"language": "EN",
"example": "https://github.com/eccenca/DaimlerDataspaceSharedData/blob/main/fuel-status.json"
},
{
"title": "Electric Vehicle Status Tryout",
"description": "Data representation of Electric Vehicle Status.",
"mediaType": "JSON",
"language": "EN",
"example": "https://github.com/eccenca/DaimlerDataspaceSharedData/blob/main/electric-vehicle-status.json"
}
]
def create_policy(title, desc):
value = f'''{{
"@context" : {{
"ids" : "http://w3id.org/idsa/core/",
"idsc" : "http://w3id.org/idsa/code/"
}},
"@type": "ids:Permission",
"@id": "http://w3id.org/idsa/autogen/permission/c0bdb9d5-e86a-4bb3-86d2-2b1dc9d226f5",
"ids:description": [
{{
"@value": "This polcy allows the usage of the API under the respective ",
"@type": "http://www.w3.org/2001/XMLSchema#string"
}}
],
"ids:title": [
{{
"@value": "Free for usage",
"@type": "http://www.w3.org/2001/XMLSchema#string"
}}
],
"ids:action": [
{{
"@id": "idsc:USE"
}}
]
}}'''
svalue = {
"value": """{
"@context" : {
"ids" : "https://w3id.org/idsa/core/",
"idsc" : "https://w3id.org/idsa/code/"
},
"@type": "ids:Permission",
"@id": "https://w3id.org/idsa/autogen/permission/154df1cf-557b-4f44-b839-4b68056606a2",
"ids:description": [
{
"@value": "Free for Usage",
"@type": "http://www.w3.org/2001/XMLSchema#string"
}
],
"ids:title": [
{
"@value": "This policy allows the data set usage by any third-party under the restrictions pre-established by the data provider Mercedes-Benz.",
"@type": "http://www.w3.org/2001/XMLSchema#string"
}
],
"ids:action": [
{
"@id": "idsc:USE"
}
]
}"""
}
parsedJSON = json.loads(value)
return s.post(
"https://" + host + "/api/rules",
json=svalue
).headers["Location"]
def get_objects(object):
return s.get(
"https://" + host + "/api/" + object + "s?page=0&size=30"
)
def create_remote_artifact(endpoint):
return s.post(
"https://" + host + "/api/artifacts",
json={"accessUrl": endpoint }
).headers["Location"]
def create_offered_resource(resource):
return s.post("https://" + host + "/api/offers", json=resource).headers["Location"]
def add_resource_to_catalog(catalog, resource):
s.post(catalog + "/offers", json=[resource])
def add_catalog_to_resource(resource, catalog):
s.post(resource + "/catalogs", json=[catalog])
def add_representation_to_resource(resource, representation):
s.post(resource + "/representations", json=[representation])
def add_artifact_to_representation(representation, artifact):
s.post(representation + "/artifacts", json=[artifact])
def add_contract_to_resource(resource, contract):
s.post(resource + "/contracts", json=[contract])
def add_rule_to_contract(contract, rule):
s.post(contract + "/rules", json=[rule])
def create_offered_resource(resource):
return s.post("https://" + host + "/api/offers", json=resource).headers["Location"]
def create_representation(representation):
return s.post("https://" + host + "/api/representations", json=representation).headers[
"Location"
]
def create_contract():
return s.post("https://" + host + "/api/contracts", json={}).headers["Location"]
def create_catalog():
return s.post("https://" + host + "/api/catalogs", json={}).headers["Location"]
def remove(object_href):
return s.delete(object_href)
def remove_uuid(offer_href, uuid):
return s.delete(offer_href, json={'id' : uuid})
def remove(object, objects):
current_objects = json.loads(objects.text)
for current_object in current_objects["_embedded"][object + 's']:
object_href = current_object["_links"]["self"]["href"]
print("Removing " + object + " " + object_href)
remove(object_href)
def remove_object_uuid(object, objects):
current_objects = json.loads(objects.text)
for current_object in current_objects["_embedded"][object + 's']:
object_href = current_object["_links"]["self"]["href"]
print("Removing " + object + " " + object_href)
uuid = object_href.rindex("/",1)
remove_uuid(object_href, uuid)
# Cleaning dataset
object_response = get_objects("catalog")
remove_object_uuid("catalog", object_response)
object_response = get_objects("offer")
remove_object_uuid("resource", object_response)
object_response = get_objects("artifact")
remove_object_uuid("artifact", object_response)
object_response = get_objects("representation")
remove_object_uuid("representation", object_response)
object_response = get_objects("contract")
remove_object_uuid("contract", object_response)
i = 0
catalog = create_catalog()
policy = create_policy(licenses[i][0] + " Usage Policy", "For more details visit " + licenses[i][1])
contract = create_contract()
print("Adding APIS to IDS Catalog:" + catalog)
for api in apis:
offer = create_offered_resource(offers[i])
representation = create_representation(representations[i])
artifact = create_remote_artifact(api)
add_resource_to_catalog(catalog, offer)
add_representation_to_resource(offer, representation)
add_artifact_to_representation(representation, artifact)
add_contract_to_resource(offer, contract)
add_rule_to_contract(contract, policy)
print("Registering " + licenses[i][0] + " in " + artifact )
i = i + 1
| 42.806122 | 302 | 0.593484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,238 | 0.575129 |