hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
10e2b00c6ccdaad9da612055fa45d2c4930f0890 | 3,498 | py | Python | Judge/judge_core.py | peeesspee/BitOJ | 0d67a87b71d0c8c8d3df719f1b9e176ec91cfb32 | [
"MIT"
] | 30 | 2019-07-28T18:05:33.000Z | 2021-12-27T10:19:31.000Z | Judge/judge_core.py | peeesspee/BitOJ | 0d67a87b71d0c8c8d3df719f1b9e176ec91cfb32 | [
"MIT"
] | 2 | 2019-09-03T19:53:03.000Z | 2019-10-18T11:00:44.000Z | Judge/judge_core.py | peeesspee/BitOJ | 0d67a87b71d0c8c8d3df719f1b9e176ec91cfb32 | [
"MIT"
] | 4 | 2019-10-02T04:54:50.000Z | 2020-08-10T13:28:58.000Z | # This process handles all the requests in the queue task_queue and updates database
import json, pika, sys, time
from database_management import manage_database, submission_management
class core():
data_changed_flags = ''
task_queue = ''
channel = ''
file_password = ''
unicast_exchange = 'connection_manager'
broadcast_exchange = 'broadcast_manager'
judge_unicast_exchange = 'judge_manager'
judge_broadcast_exchange = 'judge_broadcast_manager'
def init_core(data_changed_flags, task_queue):
core.data_changed_flags = data_changed_flags
core.task_queue = task_queue
conn, cur = manage_database.initialize_database()
print('[ JUDGE ][ CORE PROCESS ] Process started')
# Infinite Loop to Poll the task_queue every second
try:
while True:
status = core.poll(task_queue)
if status == 1:
break
# Poll every second
time.sleep(2)
# If we reach this point, it means the Server Shutdown has been initiated.
print('[ CORE ] Shutdown')
core.data_changed_flags[6] = 1
except KeyboardInterrupt:
core.data_changed_flags[6] = 1
print('[ CORE ] Force Shutdown')
finally:
manage_database.close_db()
sys.exit()
def poll(task_queue):
# If sys exit is called, the following flag will be 1
if(core.data_changed_flags[5] == 1):
return 1
# While there is data to process in the task_queue,
try:
while task_queue.empty() == False:
# Data in the task queue is in JSON format
data = task_queue.get()
data = json.loads(data)
code = data['Code']
# Contest START signal
if code == 'JUDGE':
run_id = data['Run ID']
client_id = data['Client ID']
verdict = data['Verdict']
language = data['Language']
problem_code = data['Problem Code']
time_stamp = data['Timestamp']
file_with_ext = data['Filename']
count = submission_management.get_count(run_id)
if count == 0:
# New Submission
print('[ CORE ] Insert Record: Run', run_id)
status = submission_management.insert_record(
run_id,
client_id,
verdict,
language,
problem_code,
time_stamp,
file_with_ext
)
if status == 0:
print('[ CORE ] Submission Processed')
else:
print('[ CORE ] Submission Not Processed')
core.data_changed_flags[4] = 1
else:
print('[ CORE ] Update Record: Run', run_id)
submission_management.update_record(
run_id,
client_id,
verdict,
language,
problem_code,
time_stamp,
file_with_ext
)
print('[ CORE ] Update successful ')
core.data_changed_flags[4] = 1
elif code == 'UPDATE':
run_id = data['Run ID']
client_id = data['Client ID']
verdict = data['Verdict']
language = data['Language']
problem_code = data['Problem Code']
time_stamp = data['Timestamp']
file_with_ext = data['Filename']
print('[ CORE ] Update: ', run_id)
submission_management.update_record(
run_id,
client_id,
verdict,
language,
problem_code,
time_stamp,
file_with_ext
)
print('[ CORE ] Update successful ')
core.data_changed_flags[4] = 1
except Exception as error:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print('[ CORE ][ ERROR ] Data Processing error : ' + str(error) + ' on line ' + str(exc_tb.tb_lineno))
finally:
return 0
| 28.209677 | 106 | 0.64837 | 3,308 | 0.945683 | 0 | 0 | 0 | 0 | 0 | 0 | 1,009 | 0.288451 |
10e350fdeb74a09ba92056d3e0417d107ac47a47 | 50 | py | Python | {{cookiecutter.project_name}}/src/{{cookiecutter.package_name}}/__init__.py | cav71/cav71-python-package-cookiecutter | 697a830560ee5e3072e28a0021e227a7d0ef5b66 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_name}}/src/{{cookiecutter.package_name}}/__init__.py | cav71/cav71-python-package-cookiecutter | 697a830560ee5e3072e28a0021e227a7d0ef5b66 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_name}}/src/{{cookiecutter.package_name}}/__init__.py | cav71/cav71-python-package-cookiecutter | 697a830560ee5e3072e28a0021e227a7d0ef5b66 | [
"BSD-3-Clause"
] | null | null | null | __version__ = "0.0.0"
__hash__ = "<invalid-hash>"
| 16.666667 | 27 | 0.66 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.46 |
10e3ae93c25d180de54302b985756fd17e3e9d4e | 5,954 | py | Python | generator/restaurant.py | mouse-reeve/unfamiliar-tourism | 18daa6f130ceeaf75b5b74d119b77580063dc838 | [
"MIT"
] | 3 | 2019-02-27T20:35:22.000Z | 2020-11-16T16:56:36.000Z | generator/restaurant.py | mouse-reeve/unfamiliar-tourism | 18daa6f130ceeaf75b5b74d119b77580063dc838 | [
"MIT"
] | 10 | 2017-10-05T19:50:47.000Z | 2018-10-17T15:17:52.000Z | generator/restaurant.py | mouse-reeve/unfamiliar-tourism | 18daa6f130ceeaf75b5b74d119b77580063dc838 | [
"MIT"
] | 1 | 2020-11-16T16:56:39.000Z | 2020-11-16T16:56:39.000Z | ''' buildings and built environment '''
from datetime import datetime
import random
import tracery
from utilities import format_text, get_latin
def eatery(name, dish, category, data):
''' a charming stone hut where they serve tea '''
earliest = data['founded'] if data['founded'] > 1700 else 1700
founding = random.randint(earliest - 4, datetime.now().year - 4)
materials = {
'brick': ['pottery', 'ceramic'],
'straw': ['woven straw', 'straw'],
'wood': ['wood'],
'stone': ['marble', 'stonework'],
'cloth': ['textile', 'tapestry'],
'glass': ['glass', 'stained glass'],
'metal': ['metal'],
'tile': ['mosaic', 'tile'],
}
rules = {
# structures
'start': [
'''With a gourmet, #cuisine# menu and #vibe_part#, #name# is a
#platitude#. It will have you craving perennial favorites
like #dish#. The setting, in a #space#, is stunning, a perfect
#city# experience.''',
'''Owner #chef# has given #cuisine# cuisine a modern edge while
still staying true to the regional style. The venue is stunning,
a #space# and #vibe_part#. Be sure to try the #dish#.''',
'''In this #vibe# #type#, you can settle down in a #space#. The menu
features staples of #cuisine# cuisine, and is best known for
traditional-style #dish#.''',
'''#name# is a #cuisine# restaurant in #city# that's been
going strong since #founding#. With a #vibe_part# and attentive
service, it offers #cuisine# cuisine in a #space#.''',
'''#name# is a #vibe# #type# in a welcoming environment. It offers
excellent #cuisine# food. The #dish# is hard to beat.''',
'''This #space# gets rave reviews for
#positive# and affordable #cuisine# food and ambiance. The
#vibe_part# makes it a #platitude#.''',
'''#name# is one of #city#'s best #cuisine# restaurants. It's a
#platitude# where you can enjoy this #space#. There are a
#positive# range of dishes on offer, including #dish#.''',
'''This #platitude# opened in #founding# and has set the tone for
#city# cuisine ever since. Regulars like to order #dish#, sit
back, and enjoy the #vibe_part#.''',
'''Something of a social hub in #city#, this #vibe# #type#
doesn't exactly advertise itself, but the #dish# is #positive#.
Overall a #platitude#.''',
'''A popular #vibe# cafe in the heart of #city# serving
#dish# and drinks.''',
'''Founded in early #founding#, #name# serves arguably the best
know #dish# in town and it deserves that distinction. It has a
#secondary_material_fancy#-decked interior and a #vibe_part#.''',
'''This simple place, popular with the city workers, covers the
bases for a #positive# lunch of #dish#.''',
'''#name# is a rather dark and seedy place to say the least, but
within its #material# walls you'll get a #positive# range of
local dishes.''',
'''This simple seven-table place offers #positive# breakfasts and
gets packed by lunchtime -- and rightly so. The #dish# is a
killer (not literally!).''',
],
# info
'name': '<em>%s</em>' % name,
'type': category,
'city': '<em>%s</em>' % get_latin(data['city_name'], capitalize=True),
'neighborhood': 'the <em>%s</em> district' % get_latin(
random.choice(data['geography']['neighborhoods']), capitalize=True),
'founding': str(founding),
'chef': data['get_person']('chef')['name'],
# descriptive componenets
'cuisine': '<em>%s</em>ian-style' % get_latin(
data['country'],
capitalize=True),
'dish': '"<em>%s</em>" (a %s)' % (get_latin(dish['name']),
dish['description']),
'platitude': [
'enduring favorite',
'first-rate establishment',
'local go-to',
'local favorite',
'popular place',
'much loved #type#',
'prestigious',
'foodie oasis',
],
'vibe_part': '#vibe# #atmosphere#',
'space': [
'#stories# with #color#-painted #material# walls and #accent#',
'stylish #material# and #secondary_material# #stories#',
],
'stories': '#%s#' % data['stories'],
'single': ['building', '#type#'],
'multi': 'spacious #building#',
'many': '%s-floor #building#' % random.choice(
['first', 'second', 'third', 'fourth', 'fifth', 'top']),
'accent': '#secondary_material# #accent_object#',
'accent_object': ['wall-hangings', 'doorways', 'lamps'],
'material': data['primary_material'],
'secondary_material': data['secondary_material'],
'secondary_material_fancy': materials[data['secondary_material']],
'building': ['suite', 'hall', 'room', '#type#'],
# wordlists
'atmosphere': ['atmosphere', 'charm'],
'positive': [
'top notch', 'good', 'great', 'fantastic',
'excellent', 'high caliber', 'wonderful', 'abundant'],
'vibe': [
'bustling',
'busy',
'relaxing',
'sophisticated',
'quaint',
'cozy',
'elegant',
'world-renowned',
'laid-back',
],
'color': ['red', 'orange', 'yellow', 'green',
'purple', 'white', 'pink'],
}
grammar = tracery.Grammar(rules)
sentence = grammar.flatten('#start#')
return format_text(sentence)
| 44.432836 | 80 | 0.533255 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,101 | 0.688781 |
10e603705f200570ddc8a317e07439c1c6aa3453 | 1,235 | py | Python | Urutu/cu/blocks.py | adityaatluri/Urutu | a01cfc5e4101e0479ae420807c8a380fcdfb96ff | [
"Apache-2.0"
] | null | null | null | Urutu/cu/blocks.py | adityaatluri/Urutu | a01cfc5e4101e0479ae420807c8a380fcdfb96ff | [
"Apache-2.0"
] | null | null | null | Urutu/cu/blocks.py | adityaatluri/Urutu | a01cfc5e4101e0479ae420807c8a380fcdfb96ff | [
"Apache-2.0"
] | null | null | null | ## CUDA blocks are initialized here!
## Created by: Aditya Atluri
## Date: Mar 03 2014
def bx(blocks_dec, kernel):
if blocks_dec == False:
string = "int bx = blockIdx.x;\n"
kernel = kernel + string
blocks_dec = True
return kernel, blocks_dec
def by(blocks_dec, kernel):
if blocks_dec == False:
string = "int by = blockIdx.y;\n"
kernel = kernel + string
blocks_dec = True
return kernel, blocks_dec
def bz(blocks_dec, kernel):
if blocks_dec == False:
string = "int bz = blockIdx.z;\n"
kernel = kernel + string
blocks_dec = True
return kernel, blocks_dec
def blocks_decl(stmt, var_nam, var_val, blocks, type_vars):
equ = stmt.index('=')
kernel = ""
if var_nam.count('Bx') < 1 and stmt.count('Bx') > 0:
pos = stmt.index('Bx')
var_nam.append(stmt[pos])
kernel += "int Bx = gridDim.x;\n"
type_vars.append("int")
if var_nam.count('By') < 1 and stmt.count('By') > 0:
pos = stmt.index('By')
var_nam.append(stmt[pos])
kernel += "int By = gridDim.y;\n"
type_vars.append("int")
if var_nam.count('Bz') < 1 and stmt.count('Bz') > 0:
pos = stmt.index('Bz')
var_nam.append(stmt[pos])
kernel += "int Bz = gridDim.z;\n"
type_vars.append("int")
return var_nam, var_val, blocks, kernel, type_vars
| 27.444444 | 59 | 0.663158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 281 | 0.22753 |
10e7175402559319c02afe0d648f19ee8be6ef3c | 953 | py | Python | tests/test_attr.py | tatsuya4649/dcolour | fc55768ca5f9208771b034c85c72cbc72ff98adc | [
"MIT"
] | null | null | null | tests/test_attr.py | tatsuya4649/dcolour | fc55768ca5f9208771b034c85c72cbc72ff98adc | [
"MIT"
] | null | null | null | tests/test_attr.py | tatsuya4649/dcolour | fc55768ca5f9208771b034c85c72cbc72ff98adc | [
"MIT"
] | null | null | null | import pytest
from dcolor.attr import *
def attr_init():
attr = Attr(
kind="bold"
)
yield attr
def test_init():
Attr(
kind="bold"
)
def test_init2():
Attr(
kind=AttrList.bold
)
@pytest.mark.parametrize(
"kind", [
b"kind",
10,
10.0,
["bold"],
{"kind": "bold"},
True
])
def test_kind_type_err(kind):
with pytest.raises(
TypeError
) as raiseinfo:
attr = Attr(
kind=kind,
)
def test_kind_value_err():
with pytest.raises(
ValueError
) as raiseinfo:
attr = Attr(
kind="kind",
)
def test_call():
attr = Attr(
kind="bold",
)
result = attr.__call__()
assert isinstance(result, str)
assert result == "\033[1m"
def test_end():
attr = Attr(
kind="bold",
)
result = attr.end()
assert isinstance(result, str)
assert result == "\033[0m"
| 15.883333 | 34 | 0.52361 | 0 | 0 | 74 | 0.07765 | 264 | 0.27702 | 0 | 0 | 79 | 0.082896 |
10e78d87cc82a459f5caa7a1a6341f84faedc2e2 | 7,358 | py | Python | tests/test_setuptools_build_subpackage.py | ashb/setuptools-build-subpackage | 6169baaea0020aaecf71e0441e1c44120c88b4ff | [
"Apache-2.0"
] | 2 | 2020-11-30T12:41:13.000Z | 2021-07-14T14:43:42.000Z | tests/test_setuptools_build_subpackage.py | ashb/setuptools-build-subpackage | 6169baaea0020aaecf71e0441e1c44120c88b4ff | [
"Apache-2.0"
] | null | null | null | tests/test_setuptools_build_subpackage.py | ashb/setuptools-build-subpackage | 6169baaea0020aaecf71e0441e1c44120c88b4ff | [
"Apache-2.0"
] | null | null | null | import os
import tarfile
import textwrap
from pathlib import Path
import setuptools
from wheel.wheelfile import WheelFile
from setuptools_build_subpackage import Distribution
ROOT = Path(__file__).parent.parent
def build_dist(folder, command, output, *args):
args = [
'--subpackage-folder',
folder,
'clean',
'--all',
command,
'--dist-dir',
output,
*args,
]
cur = os.getcwd()
os.chdir('example')
try:
setuptools.setup(
distclass=Distribution,
script_args=args,
)
finally:
os.chdir(cur)
def test_bdist_wheel(tmp_path):
build_dist('example/sub_module_a', 'bdist_wheel', tmp_path)
build_dist('example/sub_module_b', 'bdist_wheel', tmp_path)
wheel_a_path = tmp_path / 'example_sub_moudle_a-0.0.0-py2.py3-none-any.whl'
wheel_b_path = tmp_path / 'example_sub_moudle_b-0.0.0-py2.py3-none-any.whl'
assert wheel_a_path.exists(), "sub_module_a wheel file exists"
assert wheel_b_path.exists(), "sub_module_b wheel file exists"
with WheelFile(wheel_a_path) as wheel_a:
assert set(wheel_a.namelist()) == {
'example/sub_module_a/__init__.py',
'example/sub_module_a/where.py',
'example_sub_moudle_a-0.0.0.dist-info/AUTHORS.rst',
'example_sub_moudle_a-0.0.0.dist-info/LICENSE',
'example_sub_moudle_a-0.0.0.dist-info/METADATA',
'example_sub_moudle_a-0.0.0.dist-info/WHEEL',
'example_sub_moudle_a-0.0.0.dist-info/top_level.txt',
'example_sub_moudle_a-0.0.0.dist-info/RECORD',
}
where = wheel_a.open('example/sub_module_a/where.py').read()
assert where == b'a = "module_a"\n'
with WheelFile(wheel_b_path) as wheel_b:
assert set(wheel_b.namelist()) == {
'example/sub_module_b/__init__.py',
'example/sub_module_b/where.py',
'example_sub_moudle_b-0.0.0.dist-info/AUTHORS.rst',
'example_sub_moudle_b-0.0.0.dist-info/LICENSE',
'example_sub_moudle_b-0.0.0.dist-info/METADATA',
'example_sub_moudle_b-0.0.0.dist-info/WHEEL',
'example_sub_moudle_b-0.0.0.dist-info/top_level.txt',
'example_sub_moudle_b-0.0.0.dist-info/RECORD',
}
where = wheel_b.open('example/sub_module_b/where.py').read()
assert where == b'a = "module_b"\n'
def test_sdist(tmp_path):
# Build both dists in the same test, so we can check there is no cross-polution
build_dist('example/sub_module_a', 'sdist', tmp_path)
build_dist('example/sub_module_b', 'sdist', tmp_path)
sdist_a_path = tmp_path / 'example_sub_moudle_a-0.0.0.tar.gz'
sdist_b_path = tmp_path / 'example_sub_moudle_b-0.0.0.tar.gz'
assert sdist_a_path.exists(), "sub_module_a sdist file exists"
assert sdist_b_path.exists(), "sub_module_b sdist file exists"
with tarfile.open(sdist_a_path) as sdist_a:
assert set(sdist_a.getnames()) == {
'example_sub_moudle_a-0.0.0',
'example_sub_moudle_a-0.0.0/AUTHORS.rst',
'example_sub_moudle_a-0.0.0/LICENSE',
'example_sub_moudle_a-0.0.0/PKG-INFO',
'example_sub_moudle_a-0.0.0/example',
'example_sub_moudle_a-0.0.0/example/sub_module_a',
'example_sub_moudle_a-0.0.0/example/sub_module_a/__init__.py',
'example_sub_moudle_a-0.0.0/example/sub_module_a/where.py',
'example_sub_moudle_a-0.0.0/example_sub_moudle_a.egg-info',
'example_sub_moudle_a-0.0.0/example_sub_moudle_a.egg-info/PKG-INFO',
'example_sub_moudle_a-0.0.0/example_sub_moudle_a.egg-info/SOURCES.txt',
'example_sub_moudle_a-0.0.0/example_sub_moudle_a.egg-info/dependency_links.txt',
'example_sub_moudle_a-0.0.0/example_sub_moudle_a.egg-info/not-zip-safe',
'example_sub_moudle_a-0.0.0/example_sub_moudle_a.egg-info/top_level.txt',
'example_sub_moudle_a-0.0.0/setup.cfg',
'example_sub_moudle_a-0.0.0/setup.py',
}
where = sdist_a.extractfile('example_sub_moudle_a-0.0.0/example/sub_module_a/where.py').read()
assert where == b'a = "module_a"\n'
setup_cfg = sdist_a.extractfile('example_sub_moudle_a-0.0.0/setup.cfg').read().decode('ascii')
assert setup_cfg == (ROOT / 'example' / 'example' / 'sub_module_a' / 'setup.cfg').open(encoding='ascii').read()
with tarfile.open(sdist_b_path) as sdist_b:
assert set(sdist_b.getnames()) == {
'example_sub_moudle_b-0.0.0',
'example_sub_moudle_b-0.0.0/AUTHORS.rst',
'example_sub_moudle_b-0.0.0/LICENSE',
'example_sub_moudle_b-0.0.0/PKG-INFO',
'example_sub_moudle_b-0.0.0/example',
'example_sub_moudle_b-0.0.0/example/sub_module_b',
'example_sub_moudle_b-0.0.0/example/sub_module_b/__init__.py',
'example_sub_moudle_b-0.0.0/example/sub_module_b/where.py',
'example_sub_moudle_b-0.0.0/example_sub_moudle_b.egg-info',
'example_sub_moudle_b-0.0.0/example_sub_moudle_b.egg-info/PKG-INFO',
'example_sub_moudle_b-0.0.0/example_sub_moudle_b.egg-info/SOURCES.txt',
'example_sub_moudle_b-0.0.0/example_sub_moudle_b.egg-info/dependency_links.txt',
'example_sub_moudle_b-0.0.0/example_sub_moudle_b.egg-info/not-zip-safe',
'example_sub_moudle_b-0.0.0/example_sub_moudle_b.egg-info/top_level.txt',
'example_sub_moudle_b-0.0.0/setup.cfg',
'example_sub_moudle_b-0.0.0/setup.py',
}
where = sdist_b.extractfile('example_sub_moudle_b-0.0.0/example/sub_module_b/where.py').read()
assert where == b'a = "module_b"\n'
setup_cfg = sdist_b.extractfile('example_sub_moudle_b-0.0.0/setup.cfg').read().decode('ascii')
assert setup_cfg == (ROOT / 'example' / 'example' / 'sub_module_b' / 'setup.cfg').open(encoding='ascii').read()
def test_license_template(tmp_path):
build_dist('example/sub_module_a', 'sdist', tmp_path, '--license-template', ROOT / 'LICENSE')
sdist_a_path = tmp_path / 'example_sub_moudle_a-0.0.0.tar.gz'
assert sdist_a_path.exists(), "sub_module_a sdist file exists"
with tarfile.open(sdist_a_path) as sdist_a:
setup_py = sdist_a.extractfile('example_sub_moudle_a-0.0.0/setup.py').read().decode('ascii')
assert setup_py == textwrap.dedent(
"""\
# Apache Software License 2.0
#
# Copyright (c) 2020, Ash Berlin-Taylor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__import__("setuptools").setup()
"""
)
| 41.806818 | 119 | 0.647323 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,406 | 0.598804 |
10eabbde5c8c1ecebe18272de8da83e00ed981a5 | 2,711 | py | Python | satchless/checkout/tests/__init__.py | styleseat/satchless | 884d0256c6af9b1de596d3875ee12dc02ecfaf8a | [
"BSD-4-Clause"
] | 1 | 2017-11-26T18:53:40.000Z | 2017-11-26T18:53:40.000Z | satchless/checkout/tests/__init__.py | styleseat/satchless | 884d0256c6af9b1de596d3875ee12dc02ecfaf8a | [
"BSD-4-Clause"
] | 13 | 2015-01-22T23:47:52.000Z | 2022-01-13T20:22:34.000Z | satchless/checkout/tests/__init__.py | styleseat/satchless | 884d0256c6af9b1de596d3875ee12dc02ecfaf8a | [
"BSD-4-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.conf import settings
from django.http import HttpResponse
from django.test import (
Client,
TestCase,
)
from satchless.cart.tests import TestCart
from ...cart.models import CART_SESSION_KEY
from ...order.app import order_app
from ...pricing import handler as pricing_handler
from ...product.tests import DeadParrot
from ...product.tests.pricing import FiveZlotyPriceHandler
from ...order.tests import TestOrder
from ..app import CheckoutApp
class BaseCheckoutAppTests(TestCase):
def _create_cart(self, client):
cart = self._get_or_create_cart_for_client(client)
cart.replace_item(self.macaw_blue, 1)
return cart
def _get_or_create_cart_for_client(self, client=None, typ='cart'):
try:
return TestCart.objects.get(
pk=client.session[CART_SESSION_KEY % typ])[0]
except KeyError:
cart = TestCart.objects.create(typ=typ)
client.session[CART_SESSION_KEY % typ] = cart.pk
return cart
def _get_or_create_order_for_client(self, client):
order_pk = client.session.get('satchless_order', None)
return self.checkout_app.order_model.objects.get(pk=order_pk)
def _create_order(self, client):
self._create_cart(client)
return self._get_order_from_session(client.session)
def _get_order_from_session(self, session):
order_pk = session.get('satchless_order', None)
if order_pk:
return self.checkout_app.order_model.objects.get(pk=order_pk)
return None
def _get_order_items(self, order):
order_items = set()
for group in order.groups.all():
order_items.update(group.items.values_list('product_variant',
'quantity'))
return order_items
class MockCheckoutApp(CheckoutApp):
cart_model = TestCart
order_model = TestOrder
def checkout(self, *args, **kwargs):
return HttpResponse()
class App(BaseCheckoutAppTests):
checkout_app = MockCheckoutApp()
def setUp(self):
self.anon_client = Client()
self.macaw = DeadParrot.objects.create(slug='macaw',
species="Hyacinth Macaw")
self.macaw_blue = self.macaw.variants.create(color='blue',
looks_alive=False)
self.original_handlers = settings.SATCHLESS_PRICING_HANDLERS
pricing_handler.pricing_queue = pricing_handler.PricingQueue(FiveZlotyPriceHandler)
def tearDown(self):
pricing_handler.pricing_queue = pricing_handler.PricingQueue(*self.original_handlers)
| 34.316456 | 93 | 0.677241 | 2,178 | 0.803394 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.043895 |
10eadf799059b6e61c4efcf6b39cc22bd1709f1f | 4,309 | py | Python | app/source/tests_previos/test6/run.py | SMDynamicsLab/Haptic | 2c109cf4098c3e9b968bfd0d6ddd48e9a80f270e | [
"MIT"
] | null | null | null | app/source/tests_previos/test6/run.py | SMDynamicsLab/Haptic | 2c109cf4098c3e9b968bfd0d6ddd48e9a80f270e | [
"MIT"
] | null | null | null | app/source/tests_previos/test6/run.py | SMDynamicsLab/Haptic | 2c109cf4098c3e9b968bfd0d6ddd48e9a80f270e | [
"MIT"
] | null | null | null | import os
import subprocess
import pathlib
import time
import sys
from numpy.core.shape_base import block
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from random import randint
import random
from datetime import datetime
def run_make():
p_status, p_output = subprocess.getstatusoutput('make')
if p_status != 0:
print(p_output)
raise Exception("Make did not run succesfully")
def start_simulation(bin_file, input_file, output_file):
if not os.path.isfile(bin_file):
print(f'Bin file does not exist at {bin_file}')
subprocess.Popen([
bin_file,
input_file,
output_file
]
# , stdout=subprocess.DEVNULL #para evitar que salga a consola
)
def plot_trials(output_file):
plt.close('all')
plt.ion()
plt.show()
# estructura: trial, x, y, z
names = ['trial', 'x', 'y', 'z']
var_names = ['angle', 'visual_feedback', 'force']
names += var_names
df = pd.read_csv(output_file, names=names, index_col=False)
df['x'] = -df['x']
fig, axs = plt.subplots(2)
x = np.linspace(df['x'].min(), df['x'].max(), 100)
y = []
for trial, group in df.groupby('trial'):
group.plot(x='y', y='x', ax=axs[0], label=f'trial {trial}')
# y.append(np.interp(x, df[df['trial']==trial]['x'], df[df['trial']==trial]['y']))
# midy = [np.mean([y[j][i] for j in range(len(df['trial'].unique()))]) for i in range(100)]
# stdy = [np.std([y[j][i] for j in range(len(df['trial'].unique()))]) for i in range(100)]
# axs[1].plot(x, midy, '--', c='black')
# axs[1].plot(x, [midy[i]+stdy[i] for i in range(100)], '--', c='red')
# axs[1].plot(x, [midy[i]-stdy[i] for i in range(100)], '--', c='red')
plt.draw()
plt.pause(1)
return
def change_variables(input_file, variables_for_trial):
f = open(input_file, "w")
variables_str = " ".join([str(i) for i in variables_for_trial])
print(variables_str, file = f)
f.close()
return
def get_variables(variables_array = []):
variables_array += get_variables_block(N=1, visual_feedback=1, force=0)
variables_array += get_variables_block(N=1, visual_feedback=0, force=0)
variables_array += get_variables_block(N=1, visual_feedback=0, force=1)
variables_array += get_variables_block(N=1, visual_feedback=0, force=0)
print(len(variables_array))
return variables_array
def get_variables_block(N, force, visual_feedback):
angles = [0, 1, 2, 3, 4, 5]*N
random.shuffle(angles)
variables = [[angle*60, visual_feedback, force] for angle in angles]
return variables
def start_controller(input_file, output_file, variables):
fname = pathlib.Path(output_file)
last_mod_time = None # epoch float
output_exists = os.path.isfile(output_file)
trial = 1
# Consumo de memoria/CPU: htop -p "$(pgrep -d , "python|test")"
while True:
# no sleep 99% CPU
# time.sleep(0.001) #~9% CPU
# time.sleep(0.01) # ~2% CPU
time.sleep(0.1)
if output_exists:
mod_time = fname.stat().st_mtime
if last_mod_time != mod_time:
print('file changed')
last_mod_time = mod_time
plot_trials(output_file)
change_variables(input_file, variables[trial])
print('len vars = ', len(variables), 'trial # = ', trial)
trial+=1
else:
output_exists = os.path.isfile(output_file)
if __name__ == "__main__":
try:
run_make()
data_path = os.path.join(sys.path[0], 'data')
os.makedirs(data_path, exist_ok=True)
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
input_file = os.path.join(data_path, f'in_{timestamp}.csv')
output_file = os.path.join(data_path, f'out_{timestamp}.csv')
bin_file = os.path.join( sys.path[0], '../../bin/lin-x86_64/test6')
variables = get_variables()
change_variables(input_file, variables[0]) # first trial
start_simulation(bin_file, input_file, output_file)
start_controller(input_file, output_file, variables)
except KeyboardInterrupt:
print('\nStopping due to KeyboardInterrupt')
except Exception as e:
print(f"Python error: {str(e)}")
| 34.472 | 95 | 0.624043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,076 | 0.24971 |
10ebbac7e6bf038ef16d63153c2620db6f0bfe22 | 7,167 | py | Python | Libraries_Benchmark/Real_dataset_experiments/code/Plotting/plots_code_real_dataset_var.py | gonzalo-munillag/Benchmarking_Differential_Privacy_Analytics_Libraries | eb0aaa38686812112d421085b0b61fa9880c4f87 | [
"MIT"
] | null | null | null | Libraries_Benchmark/Real_dataset_experiments/code/Plotting/plots_code_real_dataset_var.py | gonzalo-munillag/Benchmarking_Differential_Privacy_Analytics_Libraries | eb0aaa38686812112d421085b0b61fa9880c4f87 | [
"MIT"
] | null | null | null | Libraries_Benchmark/Real_dataset_experiments/code/Plotting/plots_code_real_dataset_var.py | gonzalo-munillag/Benchmarking_Differential_Privacy_Analytics_Libraries | eb0aaa38686812112d421085b0b61fa9880c4f87 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import matplotlib.gridspec as gs
import numpy as np
import pandas as pd
import csv
from matplotlib.lines import Line2D
epsilon = pd.read_pickle('epsilon.pkl')
def plots_with_sizes(result_folder, query, attribute):
if attribute == 'age':
d = 1
if attribute == 'hrs':
d = 2
if attribute == 'absences':
d = 3
if attribute == 'grade':
d = 4
################# Std of scaled error ######################
diffprivlib_std = pd.read_csv(result_folder + "\\diffprivlib\\{q}\\results_dataset_{d}\\std_scaled_error\\DP_std_scaled_error.csv".format(q=query,d=d), header=None)
smartnoise_std = pd.read_csv(result_folder + "\\smartnoise\\{q}\\results_dataset_{d}\\std_scaled_error\\DP_std_scaled_error.csv".format(q=query,d=d), header=None)
pydp_std = pd.read_csv(result_folder + "\\pydp\\{q}\\results_dataset_{d}\\std_scaled_error\\DP_std_scaled_error.csv".format(q=query,d=d), header=None)
diffpriv_std = pd.read_csv(result_folder + "\\diffpriv_simple\\{q}\\results_dataset_{d}\\std_scaled_error\\std_scaled_error.csv".format(q=query,d=d), header=None)
#chorus_std = pd.read_csv(result_folder + "\\chorus_real_dataset_results\\{q}\\results_dataset_{d}\\std_scaled_error\\DP_std_scaled_error.csv".format(q=query,d=d), header=None)
################# Mean relative error ######################
diffprivlib_relative = pd.read_csv(result_folder + "\\diffprivlib\\{q}\\results_dataset_{d}\\mean_relative_error\\DP_mean_relative_error.csv".format(q=query,d=d), header=None)
smartnoise_relative = pd.read_csv(result_folder + "\\smartnoise\\{q}\\results_dataset_{d}\\mean_relative_error\\DP_mean_relative_error.csv".format(q=query,d=d), header=None)
pydp_relative = pd.read_csv(result_folder + "\\pydp\\{q}\\results_dataset_{d}\\mean_relative_error\\DP_mean_relative_error.csv".format(q=query,d=d), header=None)
diffpriv_relative = pd.read_csv(result_folder + "\\diffpriv_simple\\{q}\\results_dataset_{d}\\mean_relative_error\\mean_relative_error.csv".format(q=query,d=d), header=None)
#chorus_relative = pd.read_csv(result_folder + "\\chorus_real_dataset_results\\{q}\\results_dataset_{d}\\mean_relative_error\\DP_mean_relative_error.csv".format(q=query,d=d), header=None)
################ labels ######################
x1 = [0.01,0,0,0,0,0,0,0,0, 0.1 ,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0, 0,0, 0,0, 1, 0,0,0,0,0,0,0,0,10,0,0,0,0,0,0,0,0,100]
x2 = [0.01,0,0,0,0,0,0,0,0, 0.1 ,0,0,0,0,0, 0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0, 0,0, 0,0, 1, 0,0,0,0,0,0,0,0,10,0,0,0,0,0,0,0,0,100]
################ Plotting ######################
gs1 = gs.GridSpec(nrows=1, ncols=2)
gs1.update(wspace=0.3, hspace=0.05) # set the spacing between axes.
figure = plt.gcf() # get current figure
figure.clf()
###### Size plot #######
ax1 = plt.subplot(gs1[0,0])
ax1.plot(x1, diffprivlib_std, "o", markeredgecolor='k', mfc='none')
ax1.plot(epsilon, diffprivlib_std, color = 'xkcd:orangish red')
ax1.plot(x1, smartnoise_std[1:], "o", markeredgecolor='k', mfc='none')
ax1.plot(epsilon, smartnoise_std[1:], color = 'xkcd:moss green')
ax1.plot(x1, pydp_std, "o", markeredgecolor='k', mfc='none')
ax1.plot(epsilon, pydp_std, color = 'xkcd:soft blue')
ax1.plot(x1, diffpriv_std, "o", markeredgecolor='k', mfc='none')
ax1.plot(epsilon, diffpriv_std, color = 'xkcd:aquamarine')
#ax1.plot(x1, chorus_std, "o", markeredgecolor='k', mfc='none')
#ax1.plot(epsilon, chorus_std, color = 'xkcd:purple')
ax1.set_xlabel('ε', fontsize = 12)
ax1.set_ylabel('Sample Std of the \n Absolute Scaled Error', fontsize = 16)
################# MEAN RELATIVE ERROR ############################
ax2 = plt.subplot(gs1[0,1])
ax2.plot(x2, abs(diffprivlib_relative)*100, "o", markeredgecolor='k', mfc='none')
ax2.plot(epsilon, abs(diffprivlib_relative)*100, color = 'xkcd:orangish red', label="diffprivlib, IBM (Python)")
ax2.plot(x2, abs(smartnoise_relative[1:])*100, "o", markeredgecolor='k', mfc='none')
ax2.plot(epsilon, abs(smartnoise_relative[1:])*100, color = 'xkcd:moss green', label="SmartNoise, Microsoft (Python wrapper over Rust)")
ax2.plot(x2, abs(pydp_relative)*100, "o", markeredgecolor='k', mfc='none')
ax2.plot(epsilon, abs(pydp_relative)*100, color = 'xkcd:soft blue', label="PyDP (Python wrapper over Google DP C++)")
ax2.plot(x2, abs(diffpriv_relative)*100, "o", markeredgecolor='k', mfc='none')
ax2.plot(epsilon, abs(diffpriv_relative)*100, color = 'xkcd:aquamarine', label="diffpriv, B. Rubinstein, et al. (R)")
#ax2.plot(x2, abs(chorus_relative)*100, "o", markeredgecolor='k', mfc='none')
#ax2.plot(epsilon, abs(chorus_relative)*100, color = 'xkcd:purple', label="Chorus, J. Near et al (Scala)")
ax2.set_xlabel('ε', fontsize = 12)
ax2.set_ylabel('Sample Mean of the \n Relative Error [%]', fontsize = 16)
#ax1.legend(prop={'size': 19}, loc="lower center", bbox_to_anchor=(1.00, -0.02), frameon=False, ncol=4, handletextpad=0.2, handlelength=1, columnspacing=0.5)
#ax2.legend(prop={'size': 18}, loc="lower center", bbox_to_anchor=(-0.13, -0.30), frameon=False, ncol=2, handletextpad=0.2, handlelength=1, columnspacing=0.5)
figure.subplots_adjust(bottom=0.30)
#legend_elements_1 = [Line2D([1], [1], color='xkcd:orangish red', label='diffprivlib, IBM (Python)'), Line2D([1], [1], color='xkcd:soft blue', label='PyDP (Python wrapper over Google DP C++)'), Line2D([1], [1], color='xkcd:moss green', label='SmartNoise, Microsoft (Python wrapper over Rust)')]
#figure.legend(prop={'size': 18.5},handles=legend_elements_1, loc="lower center", bbox_to_anchor=(0.33, -0.02), frameon=False, ncol=1, handletextpad=0.2, handlelength=1)
#legend_elements_2 = [ Line2D([1], [1], color='xkcd:aquamarine', label='diffpriv, B. Rubinstein, et al. (R)'), Line2D([1], [1], color='xkcd:purple', label='Chorus, J. Near et al (Scala)')]
#legend_elements_2 = [ Line2D([1], [1], color='xkcd:aquamarine', label='diffpriv, B. Rubinstein, et al. (R)')]
#figure.legend(prop={'size': 18.5},handles=legend_elements_2, loc="lower center", bbox_to_anchor=(0.77, 0.1), frameon=False, ncol=1, handletextpad=0.2, handlelength=1)
if query == 'count':
ax1.set_ylim(10**-8, 10**3)
figure.suptitle('Count Query', fontsize=19)
if query == 'sum':
ax1.set_ylim(10**-8, 10**8)
figure.suptitle('Sum Query', fontsize=19)
if query == 'mean':
ax1.set_ylim(10**-12, 10**2)
figure.suptitle('Mean Query', fontsize=19)
if query == 'var':
ax1.set_ylim(10**-8, 10**4)
figure.suptitle('Variance Query', fontsize=19)
ax1.tick_params(axis='both', which='major', labelsize=16)
ax2.tick_params(axis='both', which='major', labelsize=16)
ax1.loglog()
ax2.set_xscale('log')
plt.show()
plots_with_sizes(result_folder="E:\\MS_Thesis\\publication_stuff\\results_Jan_2021\\real_dataset_micro\\22April2021", query="var", attribute='grade') | 62.321739 | 298 | 0.654528 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,685 | 0.514019 |
10eec5ef18a14eb50d0dd2a7fc5c856e9cf818ae | 5,443 | py | Python | tests/anim.py | jerry-belaston/gopro-lib-node.gl | 623031489ddc82ed980c15bad349391c5b6bab5c | [
"Apache-2.0"
] | 1 | 2020-09-02T01:30:21.000Z | 2020-09-02T01:30:21.000Z | tests/anim.py | jerry-belaston/gopro-lib-node.gl | 623031489ddc82ed980c15bad349391c5b6bab5c | [
"Apache-2.0"
] | 1 | 2020-09-09T16:14:38.000Z | 2020-09-09T16:14:38.000Z | tests/anim.py | jerry-belaston/gopro-lib-node.gl | 623031489ddc82ed980c15bad349391c5b6bab5c | [
"Apache-2.0"
] | 1 | 2020-09-09T14:51:05.000Z | 2020-09-09T14:51:05.000Z | #!/usr/bin/env python
#
# Copyright 2020 GoPro Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import itertools
import random
import pynodegl as ngl
from pynodegl_utils.tests.cmp_floats import test_floats
def _easing_split(easing):
name_split = easing.split(':')
easing_name = name_split[0]
args = [float(x) for x in name_split[1:]] if len(name_split) > 1 else None
return easing_name, args
def _easing_join(easing, args):
return easing if not args else easing + ':' + ':'.join('%g' % x for x in args)
_easing_specs = (
('linear', 0),
('quadratic', 3),
('cubic', 3),
('quartic', 3),
('quintic', 3),
('power:7.3', 3),
('sinus', 3),
('exp', 3),
('circular', 3),
('bounce', 1),
('elastic', 1),
('back', 3),
)
def _get_easing_list():
easings = []
for col, (easing, flags) in enumerate(_easing_specs):
versions = []
if flags & 1:
versions += ['_in', '_out']
if flags & 2:
versions += ['_in_out', '_out_in']
if not flags:
versions = ['']
for version in versions:
base_name, args = _easing_split(easing)
easing_name = _easing_join(base_name + version, args)
easings.append(easing_name)
return easings
_offsets = (None, (0.0, 0.7), (0.3, 1.0), (0.3, 0.7))
_easing_list = _get_easing_list()
@test_floats()
def anim_forward_api(nb_points=7):
scale = 1. / float(nb_points)
ret = []
times = [i * scale for i in range(nb_points + 1)]
for easing in _easing_list:
easing_name, easing_args = _easing_split(easing)
for offsets in _offsets:
values = [ngl.easing_evaluate(easing_name, t, easing_args, offsets) for t in times]
ret.append([easing_name] + values)
return ret
@test_floats()
def anim_resolution_api(nb_points=7):
scale = 1. / float(nb_points)
ret = []
times = [i * scale for i in range(nb_points + 1)]
for easing in _easing_list:
easing_name, easing_args = _easing_split(easing)
for offsets in _offsets:
try:
values = [ngl.easing_solve(easing_name, t, easing_args, offsets) for t in times]
except Exception as e:
pass
else:
ret.append([easing_name] + values)
return ret
def _get_anim_func(size, animated_type, kf_func):
@test_floats()
def test_func():
offsets = ((None, None), (None, 0.7), (0.3, None), (0.3, 0.7))
nb_kf = len(_easing_specs) + 1
nb_queries = nb_kf - 1
scale = 1. / float(nb_kf)
random.seed(0)
kfvalues = [[random.uniform(0, 1) for r in range(size)] for i in range(nb_kf + 1)]
ret = []
for i, (easing_start_offset, easing_end_offset) in enumerate(offsets):
anim_kf = [kf_func(0, kfvalues[0])]
for j in range(nb_kf):
t = (j + 1) * scale
v = kfvalues[j + 1]
easing_name, easing_args = _easing_split(_easing_list[j])
anim_kf.append(kf_func(t, v,
easing=easing_name,
easing_args=easing_args,
easing_start_offset=easing_start_offset,
easing_end_offset=easing_end_offset))
anim = animated_type(anim_kf)
# Query between times
values = [anim.evaluate((t_id + 1) * scale) for t_id in range(nb_queries)]
# Query boundaries and out of them (to trigger a copy instead of a mix)
values += [anim.evaluate(0)]
values += [anim.evaluate(1)]
values += [anim.evaluate(5)]
if hasattr(values[0], '__iter__'):
values = list(itertools.chain(*values))
ret.append(['off%d' % i] + values)
return ret
return test_func
_float_kf_func = lambda t, v, **kw: ngl.AnimKeyFrameFloat(t, v[0], **kw)
_vec2_kf_func = lambda t, v, **kw: ngl.AnimKeyFrameVec2(t, v, **kw)
_vec3_kf_func = lambda t, v, **kw: ngl.AnimKeyFrameVec3(t, v, **kw)
_vec4_kf_func = lambda t, v, **kw: ngl.AnimKeyFrameVec4(t, v, **kw)
_quat_kf_func = lambda t, v, **kw: ngl.AnimKeyFrameQuat(t, v, **kw)
anim_forward_float = _get_anim_func(1, ngl.AnimatedFloat, _float_kf_func)
anim_forward_vec2 = _get_anim_func(2, ngl.AnimatedVec2, _vec2_kf_func)
anim_forward_vec3 = _get_anim_func(3, ngl.AnimatedVec3, _vec3_kf_func)
anim_forward_vec4 = _get_anim_func(4, ngl.AnimatedVec4, _vec4_kf_func)
anim_forward_quat = _get_anim_func(4, ngl.AnimatedQuat, _quat_kf_func)
| 34.232704 | 96 | 0.60959 | 0 | 0 | 0 | 0 | 2,477 | 0.45508 | 0 | 0 | 1,073 | 0.197134 |
10eff1d39f6acd5ae6fc306444aa467930b6a9d1 | 1,624 | py | Python | ozpcenter/models/import_task_result.py | emosher/ozp-backend | d31d00bb8a28a8d0c999813f616b398f41516244 | [
"Apache-2.0"
] | 1 | 2018-10-05T17:03:01.000Z | 2018-10-05T17:03:01.000Z | ozpcenter/models/import_task_result.py | emosher/ozp-backend | d31d00bb8a28a8d0c999813f616b398f41516244 | [
"Apache-2.0"
] | 1 | 2017-01-06T19:20:32.000Z | 2017-01-06T19:20:32.000Z | ozpcenter/models/import_task_result.py | emosher/ozp-backend | d31d00bb8a28a8d0c999813f616b398f41516244 | [
"Apache-2.0"
] | 7 | 2016-12-16T15:42:05.000Z | 2020-09-05T01:11:27.000Z | from django.db import models
from ozpcenter.utils import get_now_utc
from .import_task import ImportTask
class ImportTaskResultManager(models.Manager):
def get_queryset(self):
return super().get_queryset()
def find_all(self):
return self.all()
def find_by_id(self, id):
return self.get(id=id)
def find_all_by_import_task(self, import_task_pk):
return self.filter(import_task=import_task_pk)
def create_result(self, import_task_id, result, message):
result = self.create(import_task_id=import_task_id, result=result, message=message)
ImportTask.objects.filter(id=import_task_id).update(last_run_result=result.id)
return result
class ImportTaskResult(models.Model):
"""
Import Task Result
Represents the results of an import task that has been run previously
"""
class Meta:
db_table = 'import_task_result'
objects = ImportTaskResultManager()
RESULT_PASS = 'Pass'
RESULT_FAIL = 'Fail'
RESULT_CHOICES = (
(RESULT_PASS, 'Pass'),
(RESULT_FAIL, 'Fail'),
)
import_task = models.ForeignKey(ImportTask, related_name="results")
run_date = models.DateTimeField(default=get_now_utc)
result = models.CharField(max_length=4, choices=RESULT_CHOICES)
message = models.CharField(max_length=4000, null=False)
def __repr__(self):
return '{0!s} | Date: {1!s} | Result: {2!s}'.format(self.import_task, self.run_date, self.result)
def __str__(self):
return '{0!s} | Date: {1!s} | Result: {2!s}'.format(self.import_task, self.run_date, self.result)
| 28.491228 | 105 | 0.690887 | 1,512 | 0.931034 | 0 | 0 | 0 | 0 | 0 | 0 | 236 | 0.14532 |
10f03c8b4ec1d6bf728c70944a7bf9b8db50e71f | 3,422 | py | Python | latmats/tasks/loader.py | ardunn/latmats | 9eabbd404041cd706dac443dda18bf4809835d3b | [
"MIT"
] | null | null | null | latmats/tasks/loader.py | ardunn/latmats | 9eabbd404041cd706dac443dda18bf4809835d3b | [
"MIT"
] | null | null | null | latmats/tasks/loader.py | ardunn/latmats | 9eabbd404041cd706dac443dda18bf4809835d3b | [
"MIT"
] | null | null | null | """
Loading utilities for computational experiments.
"""
import os
import pandas as pd
DATA_DIR = os.path.dirname(os.path.abspath(__file__))
def load_zT(all_data=False):
"""
Thermoelectric figures of merit for 165 experimentally measured compounds.
Obtained from the Citrination database maintained by Citrine, Inc.
Citrine obtained from Review https://doi.org/10.1021/cm400893e which took
measurements at 300K from many original publications.
All samples are
- Measured at 300K (within 0.01 K)
- polycrystalline
If all_data is loaded, the columns are:
- composition: composition as a string
- zT: thermoelectric figure of merit
- PF (W/m.K2): power factor
- k (W/m.K): overall thermal conductivity
- S (uV/K): Seebeck coefficient
- log rho: Log resistivity, presumably in ohm-meters.
Args:
all_data (bool): Whether all data will be returned in the df. If False,
only the compositions as strings and the zT measurements will be
loaded.
Returns:
(pd.DataFrame): The dataframe containing the zT data.
"""
path = os.path.join(DATA_DIR, "zT-citrination-165.csv")
df = pd.read_csv(path, index_col=None)
if not all_data:
df = df[["composition", "zT"]]
return df
def load_e_form():
"""
85,014 DFT-GGA computed formation energies.
Ground state formation energies from the Materials Project, adapted
from https://github.com/CJBartel/TestStabilityML/blob/master/mlstabilitytest/mp_data/data.py
originally gathered from the Materials Project via MAPI on Nov 6, 2019.
There is exactly one formation energy per composition. The formation energy
was chosen as the ground state energy among all sructures with the desired
composition.
Returns:
(pd.DataFrame): The formation energies and compositions
"""
path = os.path.join(DATA_DIR, "eform-materialsproject-85014.csv")
df = pd.read_csv(path, index_col="mpid")
return df
def load_expt_gaps():
"""
4,604 experimental band gaps, one per composition.
Matbench v0.1 test dataset for predicting experimental band gap from
composition alone. Retrieved from Zhuo et al
(https:doi.org/10.1021/acs.jpclett.8b00124) supplementary information.
Deduplicated according to composition, removing compositions with reported
band gaps spanning more than a 0.1eV range; remaining compositions were
assigned values based on the closest experimental value to the mean
experimental value for that composition among all reports.
Returns:
(pd.DataFrame): Experimental band gaps and compositions as strings
"""
path = os.path.join(DATA_DIR, "bandgap-zhuo-4604.csv")
df = pd.read_csv(path, index_col=False)
return df
def load_steels():
"""
312 yeild strengths of various steels.
Matbench v0.1 dataset for predicting steel yield strengths from chemical
composition alone. Retrieved from Citrine informatics. Deduplicated.
Experimentally measured steel yield strengths, in GPa.
https://citrination.com/datasets/153092/
Returns:
(pd.DataFrame): Dataframe of yield strengths per composition.
"""
path = os.path.join(DATA_DIR, "yieldstrength-citrination-312.csv")
df = pd.read_csv(path, index_col=False)
return df
if __name__ == "__main__":
df = load_steels()
print(df) | 31.394495 | 96 | 0.708358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,750 | 0.803624 |
10f0b5be627adb241ec165f681ffdd4b7c724bf9 | 1,913 | py | Python | triviaqa_cp/triviaqa_cp_loader.py | chrisc36/debias | 98033fd569499879ba8d0ef917b37913660f3701 | [
"Apache-2.0"
] | 51 | 2019-09-12T03:40:08.000Z | 2022-03-12T07:47:33.000Z | triviaqa_cp/triviaqa_cp_loader.py | kiminh/debias | 98033fd569499879ba8d0ef917b37913660f3701 | [
"Apache-2.0"
] | 2 | 2020-05-22T14:32:50.000Z | 2021-03-26T08:36:47.000Z | triviaqa_cp/triviaqa_cp_loader.py | kiminh/debias | 98033fd569499879ba8d0ef917b37913660f3701 | [
"Apache-2.0"
] | 8 | 2019-10-25T06:08:29.000Z | 2021-06-23T22:15:58.000Z | import json
def get_qtypes(dataset_name, part):
"""Return list of question-types for a particular TriviaQA-CP dataset"""
if dataset_name not in {"location", "person"}:
raise ValueError("Unknown dataset %s" % dataset_name)
if part not in {"train", "dev", "test"}:
raise ValueError("Unknown part %s" % part)
is_biased = part in {"train", "dev"}
is_location = dataset_name == "location"
if is_biased and is_location:
return ["person", "other"]
elif not is_biased and is_location:
return ["location"]
elif is_biased and not is_location:
return ["location", "other"]
elif not is_biased and not is_location:
return ["person"]
else:
raise RuntimeError()
def load_triviaqa_cp(filename, dataset_name, part, expected_version=None):
"""Load a TriviaQA-CP dataset
:param filename: The TriviaQA-CP train or dev json file, must be the train file if
if `part`=="train" and the dev file otherwise
:param dataset_name: dataset to load, must be in ["person", "location"]
:param part: which part, must be in ["test", "dev", "train"[
:param expected_version: Optional version to require the data to match
:return: List of question in dictionary form
"""
target_qtypes = get_qtypes(dataset_name, part)
with open(filename, "r") as f:
data = json.load(f)
if expected_version is not None:
if expected_version != data["Version"]:
raise ValueError("Expected version %s, but data was version %s" % (
expected_version, data["Version"]))
if part == "train":
if data["Split"] != "Train":
raise ValueError("Expected train file, but split is %s" % data["Split"])
else:
if data["Split"] != "Dev":
raise ValueError("Expected dev file, but split is %s" % data["Split"])
out = []
for question in data["Data"]:
if question["QuestionType"] in target_qtypes:
out.append(question)
return out
| 32.423729 | 84 | 0.671197 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 868 | 0.453738 |
10f33906f4fef402a2de99509529076ca712a7c2 | 25 | py | Python | org/sfu/billing/controller/__init__.py | MehdiLebdi/Real-Time-Charging_system | 9eb59c12a36b3e10d9b3bf99bf2cd09a91376a10 | [
"Apache-2.0"
] | 1 | 2020-08-15T08:34:36.000Z | 2020-08-15T08:34:36.000Z | org/sfu/billing/controller/__init__.py | MehdiLebdi/Real-Time-Charging_system | 9eb59c12a36b3e10d9b3bf99bf2cd09a91376a10 | [
"Apache-2.0"
] | null | null | null | org/sfu/billing/controller/__init__.py | MehdiLebdi/Real-Time-Charging_system | 9eb59c12a36b3e10d9b3bf99bf2cd09a91376a10 | [
"Apache-2.0"
] | null | null | null |
__all__= ['controller']
| 8.333333 | 23 | 0.68 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.48 |
10f540c5034ca1b5afdb44405af84acd35b8db36 | 385 | py | Python | tests/unicode/unicode_id.py | learnforpractice/micropython-cpp | 004bc8382f74899e7b876cc29bfa6a9cc976ba10 | [
"MIT"
] | 692 | 2016-12-19T23:25:35.000Z | 2022-03-31T14:20:48.000Z | tests/unicode/unicode_id.py | learnforpractice/micropython-cpp | 004bc8382f74899e7b876cc29bfa6a9cc976ba10 | [
"MIT"
] | 509 | 2017-03-28T19:37:18.000Z | 2022-03-31T20:31:43.000Z | tests/unicode/unicode_id.py | learnforpractice/micropython-cpp | 004bc8382f74899e7b876cc29bfa6a9cc976ba10 | [
"MIT"
] | 228 | 2016-12-19T05:03:30.000Z | 2022-03-22T18:13:00.000Z | # test unicode in identifiers
# comment
# αβγδϵφζ
# global identifiers
α = 1
αβγ = 2
bβ = 3
βb = 4
print(α, αβγ, bβ, βb)
# function, argument, local identifiers
def α(β, γ):
δ = β + γ
print(β, γ, δ)
α(1, 2)
# class, method identifiers
class φ:
def __init__(self):
pass
def δ(self, ϵ):
print(ϵ)
zζzζz = φ()
if hasattr(zζzζz, "δ"):
zζzζz.δ(ϵ=123)
| 13.75 | 39 | 0.584416 | 86 | 0.200935 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.336449 |
10f5834075ee59a03333434f3790eb69637b29a2 | 552 | py | Python | examples/hsets.py | gfmartins/cssdbpy | f2369dd46caeb6bd84f2b1deacb8fb9416b26afc | [
"BSD-2-Clause"
] | 85 | 2016-09-05T19:41:37.000Z | 2021-11-08T11:26:54.000Z | examples/hsets.py | gfmartins/cssdbpy | f2369dd46caeb6bd84f2b1deacb8fb9416b26afc | [
"BSD-2-Clause"
] | 10 | 2016-09-22T06:42:08.000Z | 2018-12-12T13:55:16.000Z | examples/hsets.py | deslum/ssdbpy | 4cecc6f421bbf1782334b294569801c5808aaaa1 | [
"BSD-2-Clause"
] | 9 | 2016-09-06T08:41:32.000Z | 2020-09-08T04:04:23.000Z | from cssdbpy import Connection
from time import time
import md5
if __name__ == '__main__':
conn = Connection('127.0.0.1', 8888)
for i in xrange(0, 10000):
md5word = md5.new('word{}'.format(i)).hexdigest()
create = conn.execute('hset','words', md5word, int(time()))
value = conn.execute('hget','words', md5word)
exists = conn.execute('hexists','words', md5word)
delete = conn.execute('hdel','words', md5word)
print md5word, value, create, exists, delete
print conn.execute('hscan', 'words', '', '', 100)
conn.execute('hclear','words')
| 32.470588 | 61 | 0.677536 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.211957 |
10f6dbf11be847714f973a856f08657cfe64dcc7 | 4,538 | py | Python | BuildTiramisuData.py | abfarahani/Image-segmentation | d58d25a593384199f9d504eab3afbf3bdd108c17 | [
"MIT"
] | null | null | null | BuildTiramisuData.py | abfarahani/Image-segmentation | d58d25a593384199f9d504eab3afbf3bdd108c17 | [
"MIT"
] | null | null | null | BuildTiramisuData.py | abfarahani/Image-segmentation | d58d25a593384199f9d504eab3afbf3bdd108c17 | [
"MIT"
] | null | null | null | import os
import argparse
from distutils.dir_util import copy_tree
import random
def main(args):
"""
Simple function that looks at the arguments passed, checks to make sure
everything expected exists, and then defines a validation data set for
later processing by TrainTiramisu.py and TestTiramisu.py
"""
args.rootDir = os.path.normpath(args.rootDir)
args.outputDir = os.path.normpath(args.outputDir)
# ensuring all expected files and directories exist
if not os.path.exists(args.rootDir):
raise Exception("ERROR: The dir '"+args.rootDir+"' doesn't exist")
if not os.path.exists(args.rootDir+"/data"):
raise Exception("ERROR: The dir '"+args.rootDir+"/data' doesn't exist")
if not os.path.exists(args.rootDir+"/masks"):
raise Exception("ERROR: The dir '"+args.rootDir+"/masks' doesn't " + \
"exist")
if not os.path.exists(args.rootDir+"/test.txt"):
raise Exception("ERROR: The file '"+args.rootDir+"/test.txt' " + \
"doesn't exist")
if not os.path.exists(args.rootDir+"/train.txt"):
raise Exception("ERROR: The dir '"+args.rootDir+"/train.txt' "+ \
"doesn't exist")
# Make all output directories if needed
if not os.path.exists(args.outputDir):
os.mkdir(args.outputDir)
if not os.path.exists(args.outputDir+"/test"):
os.mkdir(args.outputDir+"/test")
if not os.path.exists(args.outputDir+"/test/data"):
os.mkdir(args.outputDir+"/test/data")
if not os.path.exists(args.outputDir+"/validate"):
os.mkdir(args.outputDir+"/validate")
if not os.path.exists(args.outputDir+"/validate/data"):
os.mkdir(args.outputDir+"/validate/data")
if not os.path.exists(args.outputDir+"/validate/masks"):
os.mkdir(args.outputDir+"/validate/masks")
if not os.path.exists(args.outputDir+"/train"):
os.mkdir(args.outputDir+"/train")
if not os.path.exists(args.outputDir+"/train/data"):
os.mkdir(args.outputDir+"/train/data")
if not os.path.exists(args.outputDir+"/train/masks"):
os.mkdir(args.outputDir+"/train/masks")
# Read in test and train files
testList = [line.rstrip('\n') for line in open(args.rootDir+"/test.txt")]
trainList = [line.rstrip('\n') for line in open(args.rootDir+"/train.txt")]
# Randomly suffle the train list
random.seed(args.randSeed)
random.shuffle(trainList)
# Copy over all test data
for name in testList:
print("test: " + name)
copy_tree(args.rootDir+"/data/"+name,args.outputDir+"/test/data/"+name)
# Copy over validate data
for name in trainList[:min(args.validNum,len(trainList))]:
print("validate: " + name)
copy_tree(args.rootDir+"/data/"+name,args.outputDir+ \
"/validate/data/"+name)
os.copy(args.rootDir+"/masks/"+name+".png",args.outputDir+ \
"/validate/masks/"+name+".png")
# Copy remaining data to train directory
for name in trainList[args.validNum:]:
print("train: " + name)
copy_tree(args.rootDir+"/data/"+name,args.outputDir+ \
"/train/data/"+name)
os.copy(args.rootDir+"/masks/"+name+".png",args.outputDir+ \
"/train/masks/"+name+".png")
# Done!
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='This ' + \
'is part of the UGA CSCI 8360 Project 2 - . Please visit our ' + \
'GitHub project at https://github.com/dsp-uga/team-linden-p2 ' + \
'for more information regarding data organization ' + \
'expectations and examples on how to execute our scripts.')
parser.add_argument('-r','--rootDir', required=True,
help='The base directory storing files and ' + \
'directories conforming with organization ' + \
'expectations, please visit out GitHub website')
parser.add_argument('-v', '--validNum', required=True, type=int,
help='Size of validate set')
parser.add_argument('-s', '--randSeed', required=True, type=int,
help='Random seed for defining validate set')
parser.add_argument('-o', '--outputDir', required=True,
help='Root directory where new files and folders ' + \
'will be placed')
args = parser.parse_args()
main(args)
| 43.219048 | 79 | 0.608859 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,686 | 0.371529 |
10f6f1699cc6b4bfebbd0b39b437bd160c9162ff | 880 | py | Python | samples/histrequester_demo.py | suuuch/tws_async | 35f4ae77734d0e8dad5b1d7f9aac8102c42c4c0c | [
"Unlicense"
] | 102 | 2017-02-15T08:13:34.000Z | 2022-03-11T02:00:57.000Z | samples/histrequester_demo.py | suuuch/tws_async | 35f4ae77734d0e8dad5b1d7f9aac8102c42c4c0c | [
"Unlicense"
] | 8 | 2017-05-03T17:28:57.000Z | 2018-09-10T11:42:48.000Z | samples/histrequester_demo.py | suuuch/tws_async | 35f4ae77734d0e8dad5b1d7f9aac8102c42c4c0c | [
"Unlicense"
] | 40 | 2017-02-18T08:17:21.000Z | 2022-02-25T22:23:26.000Z | import datetime
import pytz
from tws_async import *
stocks = [
Stock('TSLA'),
Stock('AAPL'),
Stock('GOOG'),
Stock('INTC', primaryExchange='NASDAQ')
]
forexs = [
Forex('EURUSD'),
Forex('GBPUSD'),
Forex('USDJPY')
]
endDate = datetime.date.today()
startDate = endDate - datetime.timedelta(days=7)
histReqs = []
for date in util.dateRange(startDate, endDate):
histReqs += [HistRequest(stock, date) for stock in stocks]
histReqs += [HistRequest(forex, date, whatToShow='MIDPOINT',
durationStr='30 D', barSizeSetting='1 day') for forex in forexs]
timezone = datetime.timezone.utc
# timezone = pytz.timezone('Europe/Amsterdam')
# timezone = pytz.timezone('US/Eastern')
util.logToConsole()
tws = HistRequester()
tws.connect('127.0.0.1', 7497, clientId=1)
task = tws.download(histReqs, rootDir='data', timezone=timezone)
tws.run(task)
| 24.444444 | 76 | 0.685227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.206818 |
10f7e736dab607c21a9da9bd75162b5602eb90c7 | 961 | py | Python | problems/test_0630_easy_to_understand.py | chrisxue815/leetcode_python | dec3c160d411a5c19dc8e9d96e7843f0e4c36820 | [
"Unlicense"
] | 1 | 2017-06-17T23:47:17.000Z | 2017-06-17T23:47:17.000Z | problems/test_0630_easy_to_understand.py | chrisxue815/leetcode_python | dec3c160d411a5c19dc8e9d96e7843f0e4c36820 | [
"Unlicense"
] | null | null | null | problems/test_0630_easy_to_understand.py | chrisxue815/leetcode_python | dec3c160d411a5c19dc8e9d96e7843f0e4c36820 | [
"Unlicense"
] | null | null | null | import heapq
import unittest
from typing import List
import utils
# O(nlog(n)) time. O(n) space. Interval, sorting by end, greedy.
class Solution:
def scheduleCourse(self, courses: List[List[int]]) -> int:
courses.sort(key=lambda course: course[1])
time = 0
q = []
for t, d in courses:
if time + t <= d:
heapq.heappush(q, -t)
time += t
elif q and t < -q[0]:
longer_course = -heapq.heappop(q)
heapq.heappush(q, -t)
time += t - longer_course
return len(q)
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().scheduleCourse(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
| 24.025 | 68 | 0.557752 | 774 | 0.805411 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.077003 |
10f9b2fbba3d5b4a7de7179cc117d380392f5116 | 1,761 | py | Python | Tests/ttLib/tables/C_F_F__2_test.py | odidev/fonttools | 27b5f568f562971d7fbf64eeb027ea61e4939db4 | [
"Apache-2.0",
"MIT"
] | 2,705 | 2016-09-27T10:02:12.000Z | 2022-03-31T09:37:46.000Z | Tests/ttLib/tables/C_F_F__2_test.py | odidev/fonttools | 27b5f568f562971d7fbf64eeb027ea61e4939db4 | [
"Apache-2.0",
"MIT"
] | 1,599 | 2016-09-27T09:07:36.000Z | 2022-03-31T23:04:51.000Z | Tests/ttLib/tables/C_F_F__2_test.py | odidev/fonttools | 27b5f568f562971d7fbf64eeb027ea61e4939db4 | [
"Apache-2.0",
"MIT"
] | 352 | 2016-10-07T04:18:15.000Z | 2022-03-30T07:35:01.000Z | """cff2Lib_test.py -- unit test for Adobe CFF fonts."""
from fontTools.ttLib import TTFont
from io import StringIO
import re
import os
import unittest
CURR_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
DATA_DIR = os.path.join(CURR_DIR, 'data')
CFF_TTX = os.path.join(DATA_DIR, "C_F_F__2.ttx")
CFF_BIN = os.path.join(DATA_DIR, "C_F_F__2.bin")
def strip_VariableItems(string):
# ttlib changes with the fontTools version
string = re.sub(' ttLibVersion=".*"', '', string)
# head table checksum and mod date changes with each save.
string = re.sub('<checkSumAdjustment value="[^"]+"/>', '', string)
string = re.sub('<modified value="[^"]+"/>', '', string)
return string
class CFFTableTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open(CFF_BIN, 'rb') as f:
font = TTFont(file=CFF_BIN)
cffTable = font['CFF2']
cls.cff2Data = cffTable.compile(font)
with open(CFF_TTX, 'r') as f:
cff2XML = f.read()
cff2XML = strip_VariableItems(cff2XML)
cls.cff2XML = cff2XML.splitlines()
def test_toXML(self):
font = TTFont(file=CFF_BIN)
cffTable = font['CFF2']
cffData = cffTable.compile(font)
out = StringIO()
font.saveXML(out)
cff2XML = out.getvalue()
cff2XML = strip_VariableItems(cff2XML)
cff2XML = cff2XML.splitlines()
self.assertEqual(cff2XML, self.cff2XML)
def test_fromXML(self):
font = TTFont(sfntVersion='OTTO')
font.importXML(CFF_TTX)
cffTable = font['CFF2']
cff2Data = cffTable.compile(font)
self.assertEqual(cff2Data, self.cff2Data)
if __name__ == "__main__":
unittest.main()
| 29.847458 | 71 | 0.635434 | 994 | 0.564452 | 0 | 0 | 369 | 0.20954 | 0 | 0 | 320 | 0.181715 |
10f9d7dfc533d1074e71035424e95b25f68c15f6 | 340 | py | Python | Module_03/mlb.py | JoseGtz/2021_python_selenium | c7b39479c78839ba2e2e2633a0f673a8b02fb4cb | [
"Unlicense"
] | null | null | null | Module_03/mlb.py | JoseGtz/2021_python_selenium | c7b39479c78839ba2e2e2633a0f673a8b02fb4cb | [
"Unlicense"
] | null | null | null | Module_03/mlb.py | JoseGtz/2021_python_selenium | c7b39479c78839ba2e2e2633a0f673a8b02fb4cb | [
"Unlicense"
] | null | null | null | from common.webdriver_factory import get_driver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
driver = get_driver('chrome')
wait = WebDriverWait(driver, 5)
driver.get('https://www.mlb.com/es/standings')
driver.quit() | 30.909091 | 64 | 0.817647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.123529 |
10fb0f98c0db7ba3d5ed61bdb4bc78ad51efafdc | 13,380 | py | Python | azext_csvmware/_help.py | ctaggart/az-csvmware-cli | 6f6f7cd5cb9ae0e34e4d81b499337c3a5ca9fc74 | [
"MIT"
] | 2 | 2020-05-20T13:33:33.000Z | 2020-09-12T03:48:15.000Z | azext_csvmware/_help.py | ctaggart/az-csvmware-cli | 6f6f7cd5cb9ae0e34e4d81b499337c3a5ca9fc74 | [
"MIT"
] | null | null | null | azext_csvmware/_help.py | ctaggart/az-csvmware-cli | 6f6f7cd5cb9ae0e34e4d81b499337c3a5ca9fc74 | [
"MIT"
] | 2 | 2020-05-11T17:10:27.000Z | 2021-01-02T16:15:35.000Z | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
This file contains the help strings (summaries and examples) for all commands and command groups.
"""
from knack.help_files import helps # pylint: disable=unused-import
helps['csvmware'] = """
type: group
short-summary: Manage Azure VMware Solution by CloudSimple.
"""
helps['csvmware vm'] = """
type: group
short-summary: Manage VMware virtual machines.
"""
helps['csvmware vm create'] = """
type: command
short-summary: Create a VMware virtual machine.
parameters:
- name: --nic
short-summary: Add or modify NICs.
long-summary: |
By default, the nics will be added according to the vSphere VM template.
You can add more nics, or modify some properties of a nic specified in the VM template.
Multiple nics can be specified by using more than one `--nic` argument.
If a nic name already exists in the VM template, that nic would be modified according to the user input.
If a nic name does not exist in the VM template, a new nic would be created and a new name will be assigned to it.
Usage: --nic name=MyNicName virtual-network=MyNetwork adapter=MyAdapter power-on-boot=True/False
- name: --disk
short-summary: Add or modify disks.
long-summary: |
By default, the disks will be added according to the vSphere VM template.
You can add more disks, or modify some properties of a disk specified in the VM template.
Multiple disks can be specified by using more than one `--disk` argument.
If a disk name already exists in the VM template, that disk would be modified according to the user input.
If a disk name does not exist in the VM template, a new disk would be created and a new name will be assigned to it.
Usage: --disk name=MyDiskName controller=SCSIControllerID mode=IndependenceMode size=DiskSizeInKB
examples:
- name: Creating a VM with default parameters from the vm template.
text: >
az csvmware vm create -n MyVm -g MyResourceGroup -p MyPrivateCloud -r MyResourcePool --template MyVmTemplate
- name: Creating a VM and adding an extra nic to the VM with virtual network MyVirtualNetwork, adapter VMXNET3, that power ups on boot.
The name entered in the nic is for identification purposes only, to see if such a nic name exists in the vm template, else a nic is created and a new name is assigned.
Lets say the vm template contains a nic with name "Network adapter 1".
text: >
az csvmware vm create -n MyVm -g MyResourceGroup -p MyPrivateCloud -r MyResourcePool --template MyVmTemplate --nic name=NicNameWouldBeAssigned virtual-network=MyVirtualNetwork adapter=VMXNET3 power-on-boot=True
- name: Customizing specific properties of a VM. Changing the number of cores to 2 and adapter of "Network adapter 1" nic to E1000E, from that specified in the template. All other properties would be defaulted from the template.
text: >
az csvmware vm create -n MyVm -g MyResourceGroup -p MyPrivateCloud -r MyResourcePool --template MyVmTemplate --cores 2 --nic name="Network adapter 1" adapter=E1000E
- name: Customizing specific properties of a VM. Changing the adapter of "Network adapter 1" nic to E1000E, from that specified in the template, and also adding another nic with virtual network MyVirtualNetwork, adapter VMXNET3, that power ups on boot.
text: >
az csvmware vm create -n MyVm -g MyResourceGroup -p MyPrivateCloud -r MyResourcePool --template MyVmTemplate --nic name="Network adapter 1" adapter=E1000E --nic name=NicNameWouldBeAssigned virtual-network=MyVirtualNetwork adapter=VMXNET3 power-on-boot=True
- name: Creating a VM and adding an extra disk to the VM with SCSI controller 0, persistent mode, and 41943040 KB size.
The name entered in the disk is for identification purposes only, to see if such a disk name exists in the vm template, else a disk is created and a new name is assigned.
Lets say the vm template contains a disk with name "Hard disk 1".
text: >
az csvmware vm create -n MyVm -g MyResourceGroup -p MyPrivateCloud -r MyResourcePool --template MyVmTemplate --disk name=DiskNameWouldBeAssigned controller=1000 mode=persistent size=41943040
- name: Customizing specific properties of a VM. Changing the size of "Hard disk 1" disk to 21943040 KB, from that specified in the template, and also adding another disk with SCSI controller 0, persistent mode, and 41943040 KB size.
text: >
az csvmware vm create -n MyVm -g MyResourceGroup -p MyPrivateCloud -r MyResourcePool --template MyVmTemplate --disk name="Hard disk 1" size=21943040 --disk name=DiskNameWouldBeAssigned controller=1000 mode=persistent size=41943040
"""
helps['csvmware vm list'] = """
type: command
short-summary: List details of VMware virtual machines in the current subscription. If resource group is specified, only the details of virtual machines in that resource group would be listed.
examples:
- name: List details of VMware VMs in the current subscription.
text: >
az csvmware vm list
- name: List details of VMware VMs in a particular resource group.
text: >
az csvmware vm list -g MyResourceGroup
"""
helps['csvmware vm delete'] = """
type: command
short-summary: Delete a VMware virtual machine.
examples:
- name: Delete a VMware VM.
text: >
az csvmware vm delete -n MyVm -g MyResourceGroup
"""
helps['csvmware vm show'] = """
type: command
short-summary: Get the details of a VMware virtual machine.
examples:
- name: Get the details of a VMware VM.
text: >
az csvmware vm show -n MyVm -g MyResourceGroup
"""
helps['csvmware vm start'] = """
type: command
short-summary: Start a VMware virtual machine.
examples:
- name: Start a VMware VM.
text: >
az csvmware vm start -n MyVm -g MyResourceGroup
"""
helps['csvmware vm stop'] = """
type: command
short-summary: Stop/Reboot/Suspend a VMware virtual machine.
examples:
- name: Power off a VMware VM.
text: >
az csvmware vm stop -n MyVm -g MyResourceGroup --mode poweroff
- name: Restart a VMware VM.
text: >
az csvmware vm stop -n MyVm -g MyResourceGroup --mode reboot
"""
helps['csvmware vm update'] = """
type: command
short-summary: Update the tags field of a VMware virtual machine.
examples:
- name: Add or update a tag.
text: >
az csvmware vm update -n MyVm -g MyResourceGroup --set tags.tagName=tagValue
- name: Remove a tag.
text: >
az csvmware vm update -n MyVm -g MyResourceGroup --remove tags.tagName
"""
helps['csvmware vm nic'] = """
type: group
short-summary: Manage VMware virtual machine's Network Interface Cards.
"""
helps['csvmware vm nic add'] = """
type: command
short-summary: Add NIC to a VMware virtual machine.
examples:
- name: Add a NIC with default parameters in a VM.
text: >
az csvmware vm nic add --vm-name MyVm -g MyResourceGroup --virtual-network MyVirtualNetwork
- name: Add a NIC with E1000E adapter that powers on boot in a VM.
text: >
az csvmware vm nic add --vm-name MyVm -g MyResourceGroup --virtual-network MyVirtualNetwork --adapter E1000E --power-on-boot true
"""
helps['csvmware vm nic list'] = """
type: command
short-summary: List details of NICs available on a VMware virtual machine.
examples:
- name: List details of NICs in a VM.
text: >
az csvmware vm nic list --vm-name MyVm -g MyResourceGroup
"""
helps['csvmware vm nic show'] = """
type: command
short-summary: Get the details of a VMware virtual machine's NIC.
examples:
- name: Get the details of a NIC in a VM.
text: >
az csvmware vm nic show --vm-name MyVm -g MyResourceGroup -n "My NIC Name"
"""
helps['csvmware vm nic delete'] = """
type: command
short-summary: Delete NICs from a VM.
examples:
- name: Delete two NICs from a VM.
text: >
az csvmware vm nic delete --vm-name MyVm -g MyResourceGroup --nics "My NIC Name 1" "My NIC Name 2"
"""
helps['csvmware vm disk'] = """
type: group
short-summary: Manage VMware virtual machine's disks.
"""
helps['csvmware vm disk add'] = """
type: command
short-summary: Add disk to a VMware virtual machine.
examples:
- name: Add a disk with default parameters in a VM.
text: >
az csvmware vm disk add --vm-name MyVm -g MyResourceGroup
- name: Add a disk with SATA controller 0 and 64 GB memory in a VM.
text: >
az csvmware vm disk add --vm-name MyVm -g MyResourceGroup --controller 15000 --size 67108864
"""
helps['csvmware vm disk list'] = """
type: command
short-summary: List details of disks available on a VMware virtual machine.
examples:
- name: List details of disks in a VM.
text: >
az csvmware vm disk list --vm-name MyVm -g MyResourceGroup
"""
helps['csvmware vm disk show'] = """
type: command
short-summary: Get the details of a VMware virtual machine's disk.
examples:
- name: Get the details of a disk in a VM.
text: >
az csvmware vm disk show --vm-name MyVm -g MyResourceGroup -n "My Disk Name"
"""
helps['csvmware vm disk delete'] = """
type: command
short-summary: Delete disks from a VM.
examples:
- name: Delete two disks from a VM.
text: >
az csvmware vm disk delete --vm-name MyVm -g MyResourceGroup --disks "My Disk Name 1" "My Disk Name 2"
"""
helps['csvmware vm-template'] = """
type: group
short-summary: Manage VMware virtual machine templates.
"""
helps['csvmware vm-template list'] = """
type: command
short-summary: List details of VMware virtual machines templates in a private cloud.
examples:
- name: List details of VM templates.
text: >
az csvmware vm-template list -p MyPrivateCloud -r MyResourcePool --location eastus
"""
helps['csvmware vm-template show'] = """
type: command
short-summary: Get the details of a VMware virtual machines template in a private cloud.
examples:
- name: Get the details of a VM template.
text: >
az csvmware vm-template show -n MyVmTemplate -p MyPrivateCloud --location eastus
"""
helps['csvmware virtual-network'] = """
type: group
short-summary: Manage virtual networks.
"""
helps['csvmware virtual-network list'] = """
type: command
short-summary: List details of available virtual networks in a private cloud.
examples:
- name: List details of virtual networks.
text: >
az csvmware virtual-network list -p MyPrivateCloud -r MyResourcePool --location eastus
"""
helps['csvmware virtual-network show'] = """
type: command
short-summary: Get the details of a virtual network in a private cloud.
examples:
- name: Get the details of a virtual network.
text: >
az csvmware virtual-network show -n MyVirtualNetwork -p MyPrivateCloud --location eastus
"""
helps['csvmware private-cloud'] = """
type: group
short-summary: Manage VMware private clouds.
"""
helps['csvmware private-cloud list'] = """
type: command
short-summary: List details of private clouds in a region.
examples:
- name: List details of private clouds in East US.
text: >
az csvmware private-cloud list --location eastus
"""
helps['csvmware private-cloud show'] = """
type: command
short-summary: Get the details of a private cloud in a region.
examples:
- name: Get the details of a private cloud which is in East US.
text: >
az csvmware private-cloud show -n MyPrivateCloud --location eastus
"""
helps['csvmware resource-pool'] = """
type: group
short-summary: Manage VMware resource pools.
"""
helps['csvmware resource-pool list'] = """
type: command
short-summary: List details of resource pools in a private cloud.
examples:
- name: List details of resource pools.
text: >
az csvmware resource-pool list -p MyPrivateCloud --location eastus
"""
helps['csvmware resource-pool show'] = """
type: command
short-summary: Get the details of a resource pool in a private cloud.
examples:
- name: Get the details of a resource pool.
text: >
az csvmware resource-pool show -n MyResourcePool -p MyPrivateCloud --location eastus
"""
| 41.169231 | 268 | 0.652242 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12,964 | 0.968909 |
10fbff50584bd8fe647d8f729ee0c1afb693afd7 | 15,033 | py | Python | pajbot/managers/songrequest.py | sgaweda/troybot | 7153c0ad387e31de57c71172893fd92c85259d1b | [
"MIT"
] | null | null | null | pajbot/managers/songrequest.py | sgaweda/troybot | 7153c0ad387e31de57c71172893fd92c85259d1b | [
"MIT"
] | 2 | 2020-02-18T03:30:30.000Z | 2020-02-18T03:31:44.000Z | pajbot/managers/songrequest.py | sgaweda/troybot | 7153c0ad387e31de57c71172893fd92c85259d1b | [
"MIT"
] | null | null | null | import logging
import threading
import time
from pajbot.managers.db import DBManager
from pajbot.managers.schedule import ScheduleManager
from pajbot.models.songrequest import SongrequestQueue, SongrequestHistory, SongRequestSongInfo
from pajbot.models.user import User
log = logging.getLogger("pajbot")
WIDGET_ID = 4
class SongrequestManager:
def __init__(self, bot):
self.bot = bot
self.enabled = False
self.current_song_id = None
self.showVideo = None
self.isVideoShowing = None
self.youtube = None
self.settings = None
self.previously_playing_spotify = None
self.paused = None
self.module_opened = None
self.previous_queue = None
self.true_volume = None
def enable(self, settings, youtube):
self.enabled = True
self.showVideo = False
self.isVideoShowing = True
self.youtube = youtube
self.settings = settings
self.current_song_id = None
self.previously_playing_spotify = False
self.paused = False
self.module_opened = False
self.previous_queue = 0
self.true_volume = int(self.settings["volume"])
thread = threading.Thread(target=self.inc_current_song, daemon=True)
thread.start()
def volume_val(self):
return int(self.true_volume * (100 / int(self.settings["volume_multiplier"])))
def to_true_volume(self, multiplied_volume):
return int(multiplied_volume * int(self.settings["volume_multiplier"]) / 100)
def disable(self):
self.enabled = False
self.paused = False
self.settings = None
self.youtube = None
self.current_song_id = None
self.module_opened = False
def open_module_function(self):
if not self.enabled:
return False
if not self.module_opened:
self.module_opened = True
self.paused = False
if not self.current_song_id:
self.load_song()
return True
return False
def close_module_function(self):
if not self.enabled:
return False
if self.module_opened:
self.module_opened = False
self.paused = False
return True
return False
def skip_function(self, skipped_by):
with DBManager.create_session_scope() as db_session:
skipped_by = User.find_by_user_input(db_session, skipped_by)
if not skipped_by:
return
skipped_by_id = skipped_by.id
if not self.enabled and self.current_song_id:
return False
self.load_song(skipped_by_id)
return True
def previous_function(self, requested_by):
if not self.enabled:
return False
with DBManager.create_session_scope() as db_session:
requested_by = User.find_by_user_input(db_session, requested_by)
if not requested_by:
return
requested_by_id = requested_by.id
SongrequestHistory._insert_previous(db_session, requested_by_id, self.previous_queue)
db_session.commit()
self.previous_queue += 1
self.load_song(requested_by_id)
return True
def pause_function(self):
if not self.enabled or not self.current_song_id:
return False
if not self.paused:
self.paused = True
self._pause()
return True
return False
def resume_function(self):
if not self.enabled or not self.current_song_id:
return False
if self.paused:
self.paused = False
self._resume()
if not self.current_song_id and self.module_opened:
self.load_song()
return True
return False
def seek_function(self, _time):
if not self.enabled:
return False
if self.current_song_id:
with DBManager.create_session_scope() as db_session:
current_song = SongrequestQueue._from_id(db_session, self.current_song_id)
current_song.current_song_time = _time
self._seek(_time)
return True
return False
def volume_function(self, volume):
if not self.enabled:
return False
self.true_volume = self.to_true_volume(volume)
self._volume()
return True
def play_function(self, database_id, skipped_by):
if not self.enabled:
return False
with DBManager.create_session_scope() as db_session:
skipped_by = User.find_by_user_input(db_session, skipped_by)
if not skipped_by:
return
skipped_by_id = skipped_by.id
song = SongrequestQueue._from_id(db_session, database_id)
song._move_song(db_session, 1)
db_session.commit()
self.load_song(skipped_by_id)
SongrequestQueue._update_queue()
return True
def move_function(self, database_id, to_id):
if not self.enabled:
return False
with DBManager.create_session_scope() as db_session:
song = SongrequestQueue._from_id(db_session, database_id)
song._move_song(db_session, to_id)
db_session.commit()
self._playlist()
SongrequestQueue._update_queue()
return True
def request_function(self, video_id, requested_by, queue=None):
if not self.enabled:
return False
with DBManager.create_session_scope() as db_session:
requested_by = User.find_by_user_input(db_session, requested_by)
if not requested_by:
return False
requested_by_id = requested_by.id
song_info = SongRequestSongInfo._create_or_get(db_session, video_id, self.youtube)
if not song_info:
log.error("There was an error!")
return False
skip_after = (
self.settings["max_song_length"] if song_info.duration > self.settings["max_song_length"] else None
)
song = SongrequestQueue._create(db_session, video_id, skip_after, requested_by_id)
if queue:
song._move_song(db_session, queue)
db_session.commit()
SongrequestQueue._update_queue()
return True
def replay_function(self, requested_by):
if not self.enabled:
return False
with DBManager.create_session_scope() as db_session:
requested_by = User.find_by_user_input(db_session, requested_by)
if not requested_by:
return False
requested_by_id = requested_by.id
current_song = SongrequestQueue._from_id(db_session, self.current_song_id)
self.request_function(current_song.video_id, current_song.requested_by_id, 1)
db_session.commit()
self.load_song(requested_by_id)
SongrequestQueue._update_queue()
return True
def requeue_function(self, database_id, requested_by):
if not self.enabled:
return False
with DBManager.create_session_scope() as db_session:
requested_by = User.find_by_user_input(db_session, requested_by)
if not requested_by:
return False
requested_by_id = requested_by.id
SongrequestHistory._from_id(db_session, database_id).requeue(db_session, requested_by_id)
db_session.commit()
SongrequestQueue._update_queue()
self._playlist()
return True
def show_function(self):
if not self.enabled:
return False
if not self.showVideo:
self.showVideo = True
if not self.paused:
self._show()
return True
return False
def hide_function(self):
if not self.enabled:
return False
if self.showVideo:
self.showVideo = False
self._hide()
return True
return False
def remove_function(self, database_id):
if not self.enabled:
return False
with DBManager.create_session_scope() as db_session:
song = SongrequestQueue._from_id(db_session, database_id)
song._remove(db_session)
db_session.commit()
SongrequestQueue._update_queue()
self._playlist()
return True
def inc_current_song(self):
while True:
if not self.enabled:
break
if self.current_song_id:
if not self.paused:
try:
with DBManager.create_session_scope() as db_session:
current_song = SongrequestQueue._from_id(db_session, self.current_song_id)
next_song = SongrequestQueue._get_next_song(db_session)
if not current_song or (
current_song.skip_after
and current_song.skip_after < current_song.current_song_time + 10
):
self.load_song()
else:
if (not current_song.requested_by) and next_song and next_song.requested_by:
self.load_song()
current_song.current_song_time += 1
except Exception as e:
log.error(e)
elif self.module_opened:
self.load_song()
time.sleep(1)
def load_song(self, skipped_by_id=None):
if not self.enabled:
return False
if self.current_song_id:
with DBManager.create_session_scope() as db_session:
current_song = SongrequestQueue._from_id(db_session, self.current_song_id)
if current_song:
if current_song.current_song_time > 5:
self.previous_queue = 0
histroy = current_song._to_histroy(db_session, skipped_by_id)
if not histroy:
log.info("History not added because stream is offline!")
else:
current_song._remove(db_session)
self._stop_video()
self._hide()
db_session.commit()
self._playlist_history()
SongrequestQueue._update_queue()
self.current_song_id = None
if not self.module_opened:
return False
with DBManager.create_session_scope() as db_session:
current_song = SongrequestQueue._get_current_song(db_session)
if not current_song:
current_song = SongrequestQueue._get_next_song(db_session)
if current_song:
current_song.playing = True
current_song.queue = 0
current_song.current_song_time = 0
self.current_song_id = current_song.id
song_info = current_song.song_info
self._play(
current_song.video_id,
song_info.title,
current_song.requested_by.username_raw if current_song.requested_by else "Backup list",
)
if self.settings["use_spotify"]:
is_playing, song_name, artistsArr = self.bot.spotify_api.state(self.bot.spotify_token_manager)
if is_playing:
self.bot.spotify_api.pause(self.bot.spotify_token_manager)
self.previously_playing_spotify = True
if not current_song.requested_by_id:
SongrequestQueue._create(
db_session,
current_song.video_id,
current_song.skip_after,
None,
SongrequestQueue._get_next_queue(db_session),
)
db_session.commit()
self._playlist()
SongrequestQueue._update_queue()
return True
if self.settings["use_spotify"]:
if self.previously_playing_spotify:
self.bot.spotify_api.play(self.bot.spotify_token_manager)
self.previously_playing_spotify = False
if self.isVideoShowing:
self._hide()
return False
def _play(self, video_id, video_title, requested_by_name):
self.bot.songrequest_websocket_manager.emit(
"play", {"video_id": video_id, "video_title": video_title, "requested_by": requested_by_name}
)
self.bot.websocket_manager.emit("songrequest_play", WIDGET_ID, {"video_id": video_id})
self.paused = True
if self.showVideo:
self._show()
self._playlist()
def ready(self):
self.resume_function()
ScheduleManager.execute_delayed(2, self._volume)
def _pause(self):
self.bot.songrequest_websocket_manager.emit("pause", {})
self.bot.websocket_manager.emit("songrequest_pause", WIDGET_ID, {})
self._hide()
def _resume(self):
self.bot.songrequest_websocket_manager.emit("resume", {})
self.bot.websocket_manager.emit("songrequest_resume", WIDGET_ID, {"volume": self.true_volume})
self.paused = False
if self.showVideo:
self._show()
def _volume(self):
self.bot.songrequest_websocket_manager.emit("volume", {"volume": self.volume_val()})
self.bot.websocket_manager.emit("songrequest_volume", WIDGET_ID, {"volume": self.true_volume})
def _seek(self, _time):
self.bot.songrequest_websocket_manager.emit("seek", {"seek_time": _time})
self.bot.websocket_manager.emit("songrequest_seek", WIDGET_ID, {"seek_time": _time})
self.paused = True
def _show(self):
self.bot.websocket_manager.emit("songrequest_show", WIDGET_ID, {})
self.isVideoShowing = True
def _hide(self):
self.bot.websocket_manager.emit("songrequest_hide", WIDGET_ID, {})
self.isVideoShowing = False
def _playlist(self):
with DBManager.create_session_scope() as db_session:
playlist = SongrequestQueue._get_playlist(db_session, 15)
self.bot.songrequest_websocket_manager.emit("playlist", {"playlist": playlist})
def _playlist_history(self):
with DBManager.create_session_scope() as db_session:
self.bot.songrequest_websocket_manager.emit(
"history", {"history": SongrequestHistory._get_history(db_session, 15)}
)
def _stop_video(self):
self.bot.songrequest_websocket_manager.emit("stop", {})
self.bot.websocket_manager.emit("songrequest_stop", WIDGET_ID, {})
| 37.866499 | 115 | 0.600545 | 14,708 | 0.978381 | 0 | 0 | 0 | 0 | 0 | 0 | 515 | 0.034258 |
10fc435cfd2d251b2fecfc35f6aaa156bcaaeea6 | 157 | py | Python | irl/common/utils/mean_or_nan.py | uidilr/deepirl_chainer | 45f6134fe457bdae1484e4847ab0701f39940faa | [
"MIT"
] | 16 | 2019-06-25T11:54:38.000Z | 2022-02-13T15:14:40.000Z | irl/common/utils/mean_or_nan.py | uidilr/deepirl_chainer | 45f6134fe457bdae1484e4847ab0701f39940faa | [
"MIT"
] | 4 | 2019-07-17T15:17:25.000Z | 2020-09-03T12:12:16.000Z | irl/common/utils/mean_or_nan.py | uidilr/deepirl_chainer | 45f6134fe457bdae1484e4847ab0701f39940faa | [
"MIT"
] | 3 | 2019-07-17T16:45:07.000Z | 2020-12-15T16:52:26.000Z | import numpy as np
def mean_or_nan(xs):
"""Return its mean a non-empty sequence, numpy.nan for a empty one."""
return np.mean(xs) if xs else np.nan | 26.166667 | 74 | 0.687898 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.44586 |
10fd14ffad39bd5b02627e93c4a2e36424183645 | 3,065 | py | Python | test/test_tae/test_serial_runner.py | dengdifan/SMAC3 | 4739741fe9f6b0b92d419bac8f0a6252858a55dc | [
"BSD-3-Clause"
] | 1 | 2021-05-12T10:11:59.000Z | 2021-05-12T10:11:59.000Z | test/test_tae/test_serial_runner.py | dengdifan/SMAC3 | 4739741fe9f6b0b92d419bac8f0a6252858a55dc | [
"BSD-3-Clause"
] | 1 | 2021-06-17T07:57:05.000Z | 2021-06-17T07:57:05.000Z | test/test_tae/test_serial_runner.py | dengdifan/SMAC3 | 4739741fe9f6b0b92d419bac8f0a6252858a55dc | [
"BSD-3-Clause"
] | null | null | null | import time
import unittest
import unittest.mock
from smac.configspace import ConfigurationSpace
from smac.runhistory.runhistory import RunInfo, RunValue
from smac.scenario.scenario import Scenario
from smac.stats.stats import Stats
from smac.tae import StatusType
from smac.tae.execute_func import ExecuteTAFuncDict
from smac.tae.serial_runner import SerialRunner
def target(x, seed, instance):
return x ** 2, {'key': seed, 'instance': instance}
def target_delayed(x, seed, instance):
time.sleep(1)
return x ** 2, {'key': seed, 'instance': instance}
class TestSerialRunner(unittest.TestCase):
def setUp(self):
self.cs = ConfigurationSpace()
self.scenario = Scenario({'cs': self.cs,
'run_obj': 'quality',
'output_dir': ''})
self.stats = Stats(scenario=self.scenario)
def test_run(self):
"""Makes sure that we are able to run a configuration and
return the expected values/types"""
# We use the funcdict as a mechanism to test SerialRunner
runner = ExecuteTAFuncDict(ta=target, stats=self.stats, run_obj='quality')
self.assertIsInstance(runner, SerialRunner)
run_info = RunInfo(config=2, instance='test', instance_specific="0",
seed=0, cutoff=None, capped=False, budget=0.0)
# submit runs! then get the value
runner.submit_run(run_info)
run_values = runner.get_finished_runs()
self.assertEqual(len(run_values), 1)
self.assertIsInstance(run_values, list)
self.assertIsInstance(run_values[0][0], RunInfo)
self.assertIsInstance(run_values[0][1], RunValue)
self.assertEqual(run_values[0][1].cost, 4)
self.assertEqual(run_values[0][1].status, StatusType.SUCCESS)
def test_serial_runs(self):
# We use the funcdict as a mechanism to test SerialRunner
runner = ExecuteTAFuncDict(ta=target_delayed, stats=self.stats, run_obj='quality')
self.assertIsInstance(runner, SerialRunner)
run_info = RunInfo(config=2, instance='test', instance_specific="0",
seed=0, cutoff=None, capped=False, budget=0.0)
runner.submit_run(run_info)
run_info = RunInfo(config=3, instance='test', instance_specific="0",
seed=0, cutoff=None, capped=False, budget=0.0)
runner.submit_run(run_info)
run_values = runner.get_finished_runs()
self.assertEqual(len(run_values), 2)
# To make sure runs launched serially, we just make sure that the end time of
# a run is later than the other
# Results are returned in left to right
self.assertLessEqual(int(run_values[1][1].endtime), int(run_values[0][1].starttime))
# No wait time in serial runs!
start = time.time()
runner.wait()
# The run takes a second, so 0.5 is sufficient
self.assertLess(time.time() - start, 0.5)
pass
if __name__ == "__main__":
unittest.main()
| 36.488095 | 92 | 0.651876 | 2,445 | 0.797716 | 0 | 0 | 0 | 0 | 0 | 0 | 592 | 0.193148 |
10fd538c1e9b6fd2668077d80094be203d83e7ee | 531 | py | Python | backend/admingym/gyms/migrations/0003_auto_20200909_0508.py | ManuelRivera98/AdminGym | caf2b6f5e9a0ed9e98567a036bec9a34b44ecf13 | [
"MIT"
] | 1 | 2020-09-14T04:23:07.000Z | 2020-09-14T04:23:07.000Z | backend/admingym/gyms/migrations/0003_auto_20200909_0508.py | ManuelRivera98/AdminGym | caf2b6f5e9a0ed9e98567a036bec9a34b44ecf13 | [
"MIT"
] | null | null | null | backend/admingym/gyms/migrations/0003_auto_20200909_0508.py | ManuelRivera98/AdminGym | caf2b6f5e9a0ed9e98567a036bec9a34b44ecf13 | [
"MIT"
] | null | null | null | # Generated by Django 3.0 on 2020-09-09 05:08
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gyms', '0002_auto_20200903_1750'),
]
operations = [
migrations.AlterField(
model_name='gym',
name='slug_name',
field=models.CharField(max_length=10, unique=True, validators=[django.core.validators.RegexValidator(message='Can not have spaces.', regex='^[a-zA-Z0-9]$')]),
),
]
| 26.55 | 170 | 0.642185 | 410 | 0.772128 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.242938 |
10fefb6ca4d9966756b86304037f68034335c7e9 | 3,467 | py | Python | tests/test_cli.py | mbbroberg/SEODeploy | 5de0c3f8f3658638128445e78854e6a6e2daa8cf | [
"MIT"
] | 48 | 2020-06-16T21:15:34.000Z | 2022-02-17T14:01:52.000Z | tests/test_cli.py | mbbroberg/SEODeploy | 5de0c3f8f3658638128445e78854e6a6e2daa8cf | [
"MIT"
] | 2 | 2020-07-06T12:22:19.000Z | 2021-03-31T19:52:07.000Z | tests/test_cli.py | mbbroberg/SEODeploy | 5de0c3f8f3658638128445e78854e6a6e2daa8cf | [
"MIT"
] | 8 | 2020-06-18T17:56:18.000Z | 2021-12-10T09:21:37.000Z | #! /usr/bin/env python
# coding: utf-8
#
# Copyright (c) 2020 JR Oakes
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Test Cases for CLI Module"""
import pytest
from click.testing import CliRunner
from seodeploy.lib.cli import cli, CONFIG
from seodeploy.lib.cli import IncorrectParameters
@pytest.fixture
def runner():
"""Fixture for invoking command-line interfaces."""
return CliRunner()
@pytest.fixture
def mock_get_sample_paths(mocker):
mock = mocker.patch("seodeploy.lib.cli.get_sample_paths")
mock.return_value = ["/path1/", "/path2/", "/path3/"]
return mock
class SEOTest:
def __init__(self, config):
self.config = config
def execute(sample_paths=None):
if sample_paths:
return 0
else:
return 1
@pytest.fixture
def mock_seotesting(mocker):
mock = mocker.patch("seodeploy.lib.cli.SEOTesting")
mock.return_value = SEOTest
return mock
def test_sample(runner, mock_get_sample_paths):
with pytest.raises(IncorrectParameters):
result = runner.invoke(cli, ["sample"], catch_exceptions=False)
result = runner.invoke(cli, ["sample", "--site_id", "5-111111"])
assert mock_get_sample_paths.called
assert result.exit_code == 0
result = runner.invoke(
cli, ["sample", "--site_id", "5-111111", "--samples_filename", "filename.txt"]
)
assert result.exit_code == 0
result = runner.invoke(
cli, ["sample", "--sitemap_url", "https://domain.com/sitemap_index.xml"]
)
assert result.exit_code == 0
result = runner.invoke(
cli,
[
"sample",
"--sitemap_url",
"https://domain.com/sitemap_index.xml",
"--limit",
"10",
],
)
assert result.exit_code == 0
def test_execute(runner, mock_get_sample_paths, mock_seotesting):
with pytest.raises(IncorrectParameters):
CONFIG.SAMPLES_FILENAME = None
result = runner.invoke(cli, ["execute"], catch_exceptions=False)
result = runner.invoke(cli, ["execute", "--samples_filename", "samples.txt"])
assert mock_get_sample_paths.called
assert mock_seotesting.called
assert result.exit_code == 0
mock_get_sample_paths.return_value = None
result = runner.invoke(cli, ["sample", "--samples_filename", "samples.txt"])
assert mock_get_sample_paths.called
assert mock_seotesting.called
assert result.exit_code == 1
| 30.412281 | 86 | 0.694837 | 193 | 0.055668 | 0 | 0 | 442 | 0.127488 | 0 | 0 | 1,609 | 0.46409 |
8001270d6cfc8547c4dfb75dfc1628301ed4ccf1 | 358 | bzl | Python | org_opencv.bzl | chris-blay/bazel | 21ea699a0a6ec2f56be52ca7ed78d5964aab3c27 | [
"Apache-2.0"
] | 1 | 2018-02-06T15:56:40.000Z | 2018-02-06T15:56:40.000Z | org_opencv.bzl | chris-blay/bazel | 21ea699a0a6ec2f56be52ca7ed78d5964aab3c27 | [
"Apache-2.0"
] | null | null | null | org_opencv.bzl | chris-blay/bazel | 21ea699a0a6ec2f56be52ca7ed78d5964aab3c27 | [
"Apache-2.0"
] | null | null | null | def sample(name, custom_package):
native.android_binary(
name = name,
deps = [":sdk"],
srcs = native.glob(["samples/" + name + "/src/**/*.java"]),
custom_package = custom_package,
manifest = "samples/" + name + "/AndroidManifest.xml",
resource_files = native.glob(["samples/" + name + "/res/**/*"]),
)
| 35.8 | 72 | 0.550279 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.23743 |
8004c7034a1064cc38f2bbc44ad4467b6a218067 | 2,909 | py | Python | deeplodocus/callbacks/overwatch.py | samuelwestlake/deeplodocus-dev | 12b283ca4eb39abf13ddc56eabc78e01e90627ff | [
"MIT"
] | null | null | null | deeplodocus/callbacks/overwatch.py | samuelwestlake/deeplodocus-dev | 12b283ca4eb39abf13ddc56eabc78e01e90627ff | [
"MIT"
] | null | null | null | deeplodocus/callbacks/overwatch.py | samuelwestlake/deeplodocus-dev | 12b283ca4eb39abf13ddc56eabc78e01e90627ff | [
"MIT"
] | null | null | null | from deeplodocus.utils.generic_utils import get_corresponding_flag
from deeplodocus.utils.notification import Notification
from deeplodocus.flags import *
from typing import Union
class OverWatch(object):
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Metric to overwatch during the training
"""
def __init__(
self,
metric: str = DEEP_LOG_TOTAL_LOSS,
condition: Union[Flag, None] = DEEP_SAVE_CONDITION_LESS,
dataset: Union[Flag, None] = DEEP_DATASET_VAL
):
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Initialize the OverWatchMetric instance
PARAMETERS:
-----------
:param name (str): The name of the metric to over watch
:param condition (Flag):
"""
self.metric = metric
self.dataset = DEEP_DATASET_VAL if dataset is None \
else get_corresponding_flag([DEEP_DATASET_TRAIN, DEEP_DATASET_VAL], dataset)
self.current_best = None
self._condition = get_corresponding_flag(DEEP_LIST_SAVE_CONDITIONS, condition)
self._is_better = None
self.set_is_better()
def watch(self, dataset: Flag, loss, losses, metrics=None):
if self.dataset.corresponds(dataset):
value = {**losses, **metrics, DEEP_LOG_TOTAL_LOSS.name: loss}[self.metric]
if self.current_best is None:
self.current_best = value
return True
elif self._is_better(value):
Notification(
DEEP_NOTIF_SUCCESS,
"%s improved from %.4e to %.4e : Improvement of %.2f" %
(self.metric, self.current_best, value, self.percent(value)) + "%"
)
self.current_best = value
return True
Notification(DEEP_NOTIF_INFO, "No improvement")
return False
@property
def condition(self):
return self._condition
@condition.setter
def condition(self, condition):
self._condition = condition
self.set_is_better()
def is_greater(self, x):
if x >= self.current_best:
return True
else:
return False
def is_less(self, x):
if x <= self.current_best:
return True
else:
return False
def set_is_better(self):
if self.condition.corresponds(DEEP_SAVE_CONDITION_LESS):
self._is_better = self.is_less
elif self.condition.corresponds(DEEP_SAVE_CONDITION_GREATER):
self._is_better = self.is_greater
else:
Notification(DEEP_NOTIF_FATAL, "OverWatch : Unknown condition : " % self.condition)
def percent(self, x):
return abs(self.current_best - x) / self.current_best * 100
| 29.383838 | 95 | 0.588518 | 2,725 | 0.936748 | 0 | 0 | 183 | 0.062908 | 0 | 0 | 556 | 0.191131 |
8006430bdd0d52a353c5652143e970dab52dd84f | 2,483 | py | Python | _utils/merge.py | louiscklaw/kicad-automation-scripts | 1ac8780a5cedb89b5bc5099488b95847b75ff1e1 | [
"Apache-2.0"
] | null | null | null | _utils/merge.py | louiscklaw/kicad-automation-scripts | 1ac8780a5cedb89b5bc5099488b95847b75ff1e1 | [
"Apache-2.0"
] | null | null | null | _utils/merge.py | louiscklaw/kicad-automation-scripts | 1ac8780a5cedb89b5bc5099488b95847b75ff1e1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# reference build https://travis-ci.org/louiscklaw/test_git_repo/builds/625335510
# https://docs.travis-ci.com/user/environment-variables/
import os, re, subprocess
import slack
from fabric.api import local, shell_env, lcd, run, settings
SLACK_TOKEN = os.environ['SLACK_TOKEN']
BRANCH_TO_MERGE_INTO='develop'
BRANCH_TO_MERGE_REGEX='^feature'
TRAVIS_BRANCH = os.environ['TRAVIS_BRANCH']
TRAVIS_COMMIT = os.environ['TRAVIS_COMMIT']
TRAVIS_BUILD_NUMBER = os.environ['TRAVIS_BUILD_NUMBER']
GITHUB_REPO = os.environ['TRAVIS_REPO_SLUG']
GITHUB_TOKEN = os.environ['GITHUB_TOKEN']
TRAVIS_COMMIT_MESSAGE = os.environ['TRAVIS_COMMIT_MESSAGE']
PUSH_URI="https://{}@github.com/{}".format(GITHUB_TOKEN, GITHUB_REPO)
TEMP_DIR = local('mktemp -d', capture=True)
local('git clone "{}" "{}"'.format(PUSH_URI, TEMP_DIR))
def slack_message(message, channel):
client = slack.WebClient(token=SLACK_TOKEN)
response = client.chat_postMessage(
channel=channel,
text=message,
username='TravisMergerBot',
icon_url=':sob:'
)
def run_command(command_body):
command_result = local(command_body, capture=True)
print(command_result, command_result.stderr)
return command_result
m = re.match(BRANCH_TO_MERGE_REGEX, TRAVIS_BRANCH)
if (m == None ) :
print('skipping merge for branch {}'.format(TRAVIS_BRANCH))
slack_message('skip merging for BUILD #{} `{}` from `{}` to `{}`'.format(TRAVIS_BUILD_NUMBER, GITHUB_REPO, TRAVIS_BRANCH, BRANCH_TO_MERGE_INTO), '#travis-build-result')
else:
with lcd(TEMP_DIR), settings(warn_only=True):
with( shell_env( GIT_COMMITTER_EMAIL='travis@travis', GIT_COMMITTER_NAME='Travis CI' ) ):
print('checkout {} branch'.format(BRANCH_TO_MERGE_INTO))
run_command('git checkout {}'.format(BRANCH_TO_MERGE_INTO))
print('Merging "{}"'.format(TRAVIS_COMMIT))
result_to_check = run_command('git merge --ff-only "{}"'.format(TRAVIS_COMMIT))
if result_to_check.failed:
slack_message('error found during merging BUILD{} `{}` from `{}` to `{}`'.format(TRAVIS_BUILD_NUMBER, GITHUB_REPO, TRAVIS_BRANCH, BRANCH_TO_MERGE_INTO), '#travis-build-result')
else:
slack_message('merging BUILD{} from {} `{}` to `{}` done, commit message "{}"'.format(TRAVIS_BUILD_NUMBER, GITHUB_REPO, TRAVIS_BRANCH, BRANCH_TO_MERGE_INTO, TRAVIS_COMMIT_MESSAGE), '#travis-build-result')
print('push commit')
run_command("git push {} {}".format(PUSH_URI, BRANCH_TO_MERGE_INTO))
| 40.048387 | 212 | 0.733387 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 780 | 0.314136 |
800672400ab002d273b97749a1115a2fe16e3cc8 | 876 | py | Python | config.py | StuartSul/SampyoNet | a24e15e8b6c9d330fa84e1570778839d9fb5fe26 | [
"MIT"
] | null | null | null | config.py | StuartSul/SampyoNet | a24e15e8b6c9d330fa84e1570778839d9fb5fe26 | [
"MIT"
] | null | null | null | config.py | StuartSul/SampyoNet | a24e15e8b6c9d330fa84e1570778839d9fb5fe26 | [
"MIT"
] | null | null | null | ## Built-in packages
import getopt
import json
import os
import sys
## Third-party packages
from PIL import Image
import joblib
import numpy as np
import tqdm
## Tensorflow
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import GlobalMaxPool2D
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import MaxPool2D
from tensorflow.keras.models import Model
from tensorflow.keras.layers import SeparableConv2D
import tensorflow_addons as tfa
## Global variable declarations
global INPUT_WIDTH
global INPUT_HEIGHT
global FILTER_SIZE
global DENSE_UNITS
global DROPOUT
global OUTPUT_CLASS
## Global model parameters (DO NOT CHANGE)
INPUT_WIDTH = 1500
INPUT_HEIGHT = 850
FILTER_SIZE = 32
DENSE_UNITS = 1024
DROPOUT = 0.3
OUTPUT_CLASS = 3
| 22.461538 | 54 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.14726 |
80067fa86a93ff0387d74613770e2eaa71c29680 | 7,715 | py | Python | src/unpackaged/abm/AnimationBehaviour/model8.py | agdturner/geog5990m | b6417820e6aaff7f0c785415c0d63eae3753a098 | [
"Apache-2.0"
] | null | null | null | src/unpackaged/abm/AnimationBehaviour/model8.py | agdturner/geog5990m | b6417820e6aaff7f0c785415c0d63eae3753a098 | [
"Apache-2.0"
] | null | null | null | src/unpackaged/abm/AnimationBehaviour/model8.py | agdturner/geog5990m | b6417820e6aaff7f0c785415c0d63eae3753a098 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__version__ 1.0.0
"""
import csv
import agentframework7 as af
import random
import matplotlib.pyplot as pyplot
from sys import argv
import matplotlib.animation as anim
import os
from multiprocessing import Process
import time
'''
Step 1: Initialise parameters
'''
print("Step 1: Initialise parameters")
print("argv", argv)
if len(argv) < 5:
num_of_agents = 10
num_of_iterations = 100
neighbourhood = 20
random_seed = 0
print("argv does not contain the expected number of arguments")
print("len(argv)", len(argv))
print("expected len(argv) 5")
print("expecting:")
print("argv[1] as a integer number for num_of_agents")
print("argv[1] as a integer number for num_of_iterations")
print("argv[1] as a integer number for neighbourhood")
print("argv[1] as a integer number for random_seed for setting the random seed")
else:
# set parameters from argv
num_of_agents = int(argv[1])
num_of_iterations = int(argv[2])
neighbourhood = int(argv[3])
random_seed = int(argv[4])
print("num_of_agents", str(num_of_agents))
print("num_of_iterations", str(num_of_iterations))
print("neighbourhood", str(neighbourhood))
print("random_seed", str(random_seed))
# Set random seed for reproducibility
random.seed(random_seed)
'''
Step 2: Initialise environment this will contain data about the spatial
environment in which agents act.
'''
print("Step 2: Initialise environment this will contain data about the",
"spatial environment in which agents act.")
environment = []
# Initialise data dirs.
dir = os.getcwd()
#print(dir)
parent = os.path.dirname(dir)
print(parent)
parent = os.path.dirname(parent)
parent = os.path.dirname(parent)
basedir = os.path.dirname(parent)
#print(basedir)
datadir = os.path.join(basedir, 'data')
#print(datadir)
inputdatadir = os.path.join(datadir, 'input')
#print(inputdatadir)
outputdatadir = os.path.join(datadir, 'output')
if not os.path.exists(outputdatadir):
os.makedirs(outputdatadir)
#print(outputdatadir)
# Open file and read.
file = os.path.join(inputdatadir, 'in.txt')
# read csv into environment
with open(file, newline='') as f:
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
for row in reader:
rowlist = []
for value in row:
rowlist.append(value)
#print(value)
environment.append(rowlist)
'''
Step 3: Initialise agents.
'''
print("Step 3: Initialise agents.")
agents = []
# Make the agents.
for i in range(num_of_agents):
# Add 1 to random seed to get each agent initialised and moving differently
random_seed += 1
agents.append(af.Agent(environment, agents,
random.randint(0,len(environment)),
random.randint(0,len(environment[0]))))
carry_on = True
fig = pyplot.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
def wait_fig():
# Block the execution of the code until the figure is closed.
# This works even with multiprocessing.
if pyplot.isinteractive():
pyplot.ioff() # this is necessary in mutliprocessing
#pyplot.show(block=True)
pyplot.show(block=False)
pyplot.ion() # restitute the interractive state
else:
#pyplot.show(block=True)
pyplot.show(block=False)
pyplot.pause(3)
pyplot.close()
return
def update(frame_number):
global carry_on #Not actually needed as we're not assigning, but clearer
# Clear fig
fig.clear()
# Process the agents in a randomish order.
#for j in range(num_of_iterations):
# The number of iterations is now controlled in the gen_function
if True:
if (carry_on):
#if (j % 10 == 0):
# print("iteration", j)
# Shuffle agents
#agents = random.shuffle(agents)
#random.shuffle(agents[, random.random()])
random.shuffle(agents)
for i in range(num_of_agents):
agents[i].move()
agents[i].eat()
agents[i].share_with_neighbours(neighbourhood)
# Stop if all agents have more than 50 store
for i in range(num_of_agents):
half_full_agent_count = 0
if (agents[i].store > 50):
half_full_agent_count += 1
if (half_full_agent_count == num_of_agents):
carry_on = False
print("stopping condition")
''' Stop randomly
if random.random() < 0.1:
carry_on = False
print("stopping condition")
'''
# Plot
# Plot environment
pyplot.xlim(0, len(environment))
pyplot.ylim(0, len(environment[0]))
pyplot.imshow(environment)
# Plot sheep
for i in range(num_of_agents):
pyplot.scatter(agents[i].getx(),agents[i].gety(), color="grey")
#print(agents[i].getx(),agents[i].gety())
def gen_function(b = [0]):
a = 0
global carry_on #Not actually needed as we're not assigning, but clearer
while (a < num_of_iterations) & (carry_on):
yield a #: Returns control and waits next call.
a = a + 1
def runAnimation():
'''
Step 4: Animate agents.
'''
print("Step 4: Animate agents.")
#animation = anim.FuncAnimation(fig, update, interval=1)
#animation = anim.FuncAnimation(fig, update, interval=1, repeat=False, frames=10)
#animation = anim.FuncAnimation(fig, update, interval=1, repeat=False, frames=num_of_iterations)
animation = anim.FuncAnimation(fig, update, frames=gen_function(), repeat=False)
"""Create animated plot. Continues to update the plot until stopping criteria is met."""
pyplot.show()
"""Display the plot."""
wait_fig()
return
def main(): # it is important that ALL the code be typed inside
# this function, otherwise the program will do weird
# things with the Ipython or even the Python console.
# Outside of this condition, type nothing but import
# clauses and function/class definitions.
if __name__ != '__main__': return
p = Process(target=runAnimation())
p.start()
#print('hello', flush = True) #just to have something printed here
p.join() # suppress this command if you want the animation be executed in
# parallel with the subsequent code
#for i in range(3): # This allows to see if execution takes place after the
# # process above, as should be the case because of p.join().
# print('world', flush = True)
# time.sleep(1)
pyplot.close()
'''
Step 5: Write out the environment to the file dataout.csv.
'''
print("Step 5: Write out the environment to the file dataout.csv.")
file = os.path.join(outputdatadir, 'dataout.csv')
with open(file, 'w', newline='') as f2:
writer = csv.writer(f2, delimiter=' ')
for row in environment:
writer.writerow(row)
'''
Step 6: Calculate total amount stored by all the agents and append this to the
file dataout2.txt.
'''
print("Step 6: Calculate total amount stored by all the agents and append",
"this to the file dataout2.txt.")
total = 0
for a in agents:
total += a.store
#print(total)
print("total", total)
# Append total to dataout2.txt
file = os.path.join(outputdatadir, 'dataout2.txt')
with open(file, "a") as f3:
f3.write(str(total) + "\n")
#f3.write("\n")
f3.flush
f3.close
main()
| 32.280335 | 100 | 0.628905 | 0 | 0 | 239 | 0.030979 | 0 | 0 | 0 | 0 | 3,631 | 0.470642 |
80083e1dfe6103dbfacdadbdcb511c7186bad38a | 26 | py | Python | password_policies/tests/__init__.py | manuerux/django-password-policies-iplweb | 5bab0277671fb8c853cec9c8aad64d92195030e9 | [
"BSD-3-Clause"
] | 5 | 2018-06-21T14:18:56.000Z | 2021-07-08T17:50:02.000Z | password_policies/tests/__init__.py | manuerux/django-password-policies-iplweb | 5bab0277671fb8c853cec9c8aad64d92195030e9 | [
"BSD-3-Clause"
] | 20 | 2018-01-25T22:01:25.000Z | 2022-03-15T13:26:47.000Z | password_policies/tests/__init__.py | manuerux/django-password-policies-iplweb | 5bab0277671fb8c853cec9c8aad64d92195030e9 | [
"BSD-3-Clause"
] | 19 | 2018-01-25T21:04:09.000Z | 2022-03-01T11:26:35.000Z | from ..receivers import *
| 13 | 25 | 0.730769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
8008fc7a56eccf641872f78a3c91511e15979ffd | 3,156 | py | Python | manubot/cite/tests/test_citekey.py | olgabot/manubot | ddd099516d58d1428f92d91a69e4b7295de13335 | [
"BSD-3-Clause"
] | 1 | 2020-01-08T20:17:51.000Z | 2020-01-08T20:17:51.000Z | manubot/cite/tests/test_citekey.py | olgabot/manubot | ddd099516d58d1428f92d91a69e4b7295de13335 | [
"BSD-3-Clause"
] | null | null | null | manubot/cite/tests/test_citekey.py | olgabot/manubot | ddd099516d58d1428f92d91a69e4b7295de13335 | [
"BSD-3-Clause"
] | null | null | null | """Tests rest of functions in manubot.cite, not covered by test_citekey_api.py."""
import pytest
from manubot.cite.citekey import (
citekey_pattern,
shorten_citekey,
infer_citekey_prefix,
inspect_citekey,
)
@pytest.mark.parametrize("citation_string", [
('@doi:10.5061/dryad.q447c/1'),
('@arxiv:1407.3561v1'),
('@doi:10.1007/978-94-015-6859-3_4'),
('@tag:tag_with_underscores'),
('@tag:tag-with-hyphens'),
('@url:https://greenelab.github.io/manubot-rootstock/'),
('@tag:abc123'),
('@tag:123abc'),
])
def test_citekey_pattern_match(citation_string):
match = citekey_pattern.fullmatch(citation_string)
assert match
@pytest.mark.parametrize("citation_string", [
('doi:10.5061/dryad.q447c/1'),
('@tag:abc123-'),
('@tag:abc123_'),
('@-tag:abc123'),
('@_tag:abc123'),
])
def test_citekey_pattern_no_match(citation_string):
match = citekey_pattern.fullmatch(citation_string)
assert match is None
@pytest.mark.parametrize("standard_citekey,expected", [
('doi:10.5061/dryad.q447c/1', 'kQFQ8EaO'),
('arxiv:1407.3561v1', '16kozZ9Ys'),
('pmid:24159271', '11sli93ov'),
('url:http://blog.dhimmel.com/irreproducible-timestamps/', 'QBWMEuxW'),
])
def test_shorten_citekey(standard_citekey, expected):
short_citekey = shorten_citekey(standard_citekey)
assert short_citekey == expected
@pytest.mark.parametrize('citekey', [
'doi:10.7717/peerj.705',
'doi:10/b6vnmd',
'pmcid:PMC4304851',
'pmid:25648772',
'arxiv:1407.3561',
'isbn:978-1-339-91988-1',
'isbn:1-339-91988-5',
'wikidata:Q1',
'wikidata:Q50051684',
'url:https://peerj.com/articles/705/',
])
def test_inspect_citekey_passes(citekey):
"""
These citekeys should pass inspection by inspect_citekey.
"""
assert inspect_citekey(citekey) is None
@pytest.mark.parametrize(['citekey', 'contains'], [
('doi:10.771/peerj.705', 'Double check the DOI'),
('doi:10/b6v_nmd', 'Double check the shortDOI'),
('doi:7717/peerj.705', "must start with '10.'"),
('doi:b6vnmd', "must start with '10.'"),
('pmcid:25648772', "must start with 'PMC'"),
('pmid:PMC4304851', "Should 'pmid:PMC4304851' switch the citation source to 'pmcid'?"),
('isbn:1-339-91988-X', 'identifier violates the ISBN syntax'),
('wikidata:P212', "item IDs must start with 'Q'"),
('wikidata:QABCD', 'does not conform to the Wikidata regex'),
])
def test_inspect_citekey_fails(citekey, contains):
"""
These citekeys should fail inspection by inspect_citekey.
"""
report = inspect_citekey(citekey)
assert report is not None
assert isinstance(report, str)
assert contains in report
@pytest.mark.parametrize(['citation', 'expect'], [
('doi:not-a-real-doi', 'doi:not-a-real-doi'),
('DOI:not-a-real-doi', 'doi:not-a-real-doi'),
('uRl:mixed-case-prefix', 'url:mixed-case-prefix'),
('raw:raw-citation', 'raw:raw-citation'),
('no-prefix', 'raw:no-prefix'),
('no-prefix:but-colon', 'raw:no-prefix:but-colon'),
])
def test_infer_citekey_prefix(citation, expect):
assert infer_citekey_prefix(citation) == expect
| 31.247525 | 91 | 0.667934 | 0 | 0 | 0 | 0 | 2,913 | 0.923004 | 0 | 0 | 1,668 | 0.528517 |
8009a4111e57ce8f5e9c9514ac216b855ebf01d1 | 1,727 | py | Python | test/main_page_tests.py | savvagen/playwright-pytest-example | acf4e89d0a7dcc1b71b1eb012366b1393f515b41 | [
"Apache-2.0"
] | 19 | 2020-11-15T16:37:51.000Z | 2022-03-23T02:41:38.000Z | test/main_page_tests.py | cjydayang/playwright-pytest-example | acf4e89d0a7dcc1b71b1eb012366b1393f515b41 | [
"Apache-2.0"
] | 2 | 2021-01-03T21:38:37.000Z | 2021-01-27T08:32:00.000Z | test/main_page_tests.py | cjydayang/playwright-pytest-example | acf4e89d0a7dcc1b71b1eb012366b1393f515b41 | [
"Apache-2.0"
] | 8 | 2020-11-05T23:27:37.000Z | 2022-03-16T08:07:00.000Z | import pytest
from playwright.sync_api import Page
from pages.main_page.main_page import MainPage
from test.test_base import *
import logging
import re
logger = logging.getLogger("test")
@pytest.mark.only_browser("chromium")
def test_find_element_list(page: Page):
main_page = MainPage(base_url, page)
main_page.delete_cookies()
main_page.open()
# Wait articles and page to be loaded
main_page.loader().should_be_visible()
main_page.loader().should_be_hidden()
assert main_page.register_button().is_visible()
pattern = re.compile(".*")
# Check articles
assert main_page.articles().size() == 10
assert main_page.articles().get(1).is_visible()
assert pattern.match(main_page.articles().get(1).title().inner_text())
assert pattern.match(main_page.articles().get(1).body().inner_text())
logger.info(main_page.articles().get(2).title().inner_text())
# Check nav panel
assert main_page.nav_bar().is_visible()
assert main_page.nav_bar().login_button().is_visible()
logger.info(main_page.nav_bar().login_button().inner_text())
logger.info(main_page.nav_bar().register_button().inner_text())
# articles = page.querySelectorAll(".article-preview")
# assert len(articles) == 10
# texts = page.evalOnSelectorAll(".article-preview h1", '''
# (elems, min) => {
# return elems.map(function(el) {
# return el.textContent //.toUpperCase()
# }); //.join(", ");
# }''')
# assert len(texts) == 10
# assert not texts == []
# assert articles[0].querySelector("h1").innerText() == "Python Playwright Demo"
# assert articles[0].querySelector("p").innerText() == "Playwright Demo"
| 38.377778 | 84 | 0.672264 | 0 | 0 | 0 | 0 | 1,536 | 0.889404 | 0 | 0 | 594 | 0.343949 |
8009f7a8792ab3c1e109b6ae68aa8435914b6d9f | 5,703 | py | Python | tfumap/parametric_tsne.py | EhsanKA/ParametricUMAP_paper | 14b6ef3ba6e46a8cc666e22eb9f9a4a1611d3c51 | [
"MIT"
] | 124 | 2020-09-27T23:59:01.000Z | 2022-03-22T06:27:35.000Z | tfumap/parametric_tsne.py | EhsanKA/ParametricUMAP_paper | 14b6ef3ba6e46a8cc666e22eb9f9a4a1611d3c51 | [
"MIT"
] | 2 | 2021-02-05T18:13:13.000Z | 2021-11-01T14:55:08.000Z | tfumap/parametric_tsne.py | EhsanKA/ParametricUMAP_paper | 14b6ef3ba6e46a8cc666e22eb9f9a4a1611d3c51 | [
"MIT"
] | 16 | 2020-09-28T07:43:21.000Z | 2022-03-21T00:31:34.000Z | ### based on https://github.com/kylemcdonald/Parametric-t-SNE/blob/master/Parametric%20t-SNE%20(Keras).ipynb
import numpy as np
from tensorflow.keras import backend as K
from tensorflow.keras.losses import categorical_crossentropy
from tqdm.autonotebook import tqdm
import tensorflow as tf
def Hbeta(D, beta):
"""Computes the Gaussian kernel values given a vector of
squared Euclidean distances, and the precision of the Gaussian kernel.
The function also computes the perplexity (P) of the distribution."""
P = np.exp(-D * beta)
sumP = np.sum(P)
H = np.log(sumP) + beta * np.sum(np.multiply(D, P)) / sumP
P = P / sumP
return H, P
def x2p(X, u=15, tol=1e-4, print_iter=500, max_tries=50, verbose=0):
"""
% X2P Identifies appropriate sigma's to get kk NNs up to some tolerance
%
% [P, beta] = x2p(xx, kk, tol)
%
% Identifies the required precision (= 1 / variance^2) to obtain a Gaussian
% kernel with a certain uncertainty for every datapoint. The desired
% uncertainty can be specified through the perplexity u (default = 15). The
% desired perplexity is obtained up to some tolerance that can be specified
% by tol (default = 1e-4).
% The function returns the final Gaussian kernel in P, as well as the
% employed precisions per instance in beta.
%
"""
# Initialize some variables
n = X.shape[0] # number of instances
P = np.zeros((n, n)) # empty probability matrix
beta = np.ones(n) # empty precision vector
logU = np.log(u) # log of perplexity (= entropy)
# Compute pairwise distances
if verbose > 0:
print("Computing pairwise distances...")
sum_X = np.sum(np.square(X), axis=1)
# note: translating sum_X' from matlab to numpy means using reshape to add a dimension
D = sum_X + sum_X[:, None] + -2 * X.dot(X.T)
# Run over all datapoints
if verbose > 0:
print("Computing P-values...")
for i in range(n):
if verbose > 1 and print_iter and i % print_iter == 0:
print("Computed P-values {} of {} datapoints...".format(i, n))
# Set minimum and maximum values for precision
betamin = float("-inf")
betamax = float("+inf")
# Compute the Gaussian kernel and entropy for the current precision
indices = np.concatenate((np.arange(0, i), np.arange(i + 1, n)))
Di = D[i, indices]
H, thisP = Hbeta(Di, beta[i])
# Evaluate whether the perplexity is within tolerance
Hdiff = H - logU
tries = 0
while abs(Hdiff) > tol and tries < max_tries:
# If not, increase or decrease precision
if Hdiff > 0:
betamin = beta[i]
if np.isinf(betamax):
beta[i] *= 2
else:
beta[i] = (beta[i] + betamax) / 2
else:
betamax = beta[i]
if np.isinf(betamin):
beta[i] /= 2
else:
beta[i] = (beta[i] + betamin) / 2
# Recompute the values
H, thisP = Hbeta(Di, beta[i])
Hdiff = H - logU
tries += 1
# Set the final row of P
P[i, indices] = thisP
if verbose > 0:
print("Mean value of sigma: {}".format(np.mean(np.sqrt(1 / beta))))
print("Minimum value of sigma: {}".format(np.min(np.sqrt(1 / beta))))
print("Maximum value of sigma: {}".format(np.max(np.sqrt(1 / beta))))
return P, beta
def compute_joint_probabilities(
samples, batch_size=5000, d=2, perplexity=30, tol=1e-5, verbose=0
):
""" This function computes the probababilities in X, split up into batches
% Gaussians employed in the high-dimensional space have the specified
% perplexity (default = 30). The number of degrees of freedom of the
% Student-t distribution may be specified through v (default = d - 1).
"""
v = d - 1
# Initialize some variables
n = samples.shape[0]
batch_size = min(batch_size, n)
# Precompute joint probabilities for all batches
if verbose > 0:
print("Precomputing P-values...")
batch_count = int(n / batch_size)
P = np.zeros((batch_count, batch_size, batch_size))
# for each batch of data
for i, start in enumerate(tqdm(range(0, n - batch_size + 1, batch_size))):
# select batch
curX = samples[start : start + batch_size]
# compute affinities using fixed perplexity
P[i], _ = x2p(curX, perplexity, tol, verbose=verbose)
# make sure we don't have NaN's
P[i][np.isnan(P[i])] = 0
# make symmetric
P[i] = P[i] + P[i].T # / 2
# obtain estimation of joint probabilities
P[i] = P[i] / P[i].sum()
P[i] = np.maximum(P[i], np.finfo(P[i].dtype).eps)
return P
def z2p(z, d, n, eps=10e-15):
""" Computes the low dimensional probability
"""
v = d - 1
sum_act = tf.math.reduce_sum(tf.math.square(z), axis=1)
Q = K.reshape(sum_act, [-1, 1]) + -2 * tf.keras.backend.dot(z, tf.transpose(z))
Q = (sum_act + Q) / v
Q = tf.math.pow(1 + Q, -(v + 1) / 2)
Q *= 1 - np.eye(n)
Q /= tf.math.reduce_sum(Q)
Q = tf.math.maximum(Q, eps)
return Q
def tsne_loss(d, batch_size, eps=10e-15):
# v = d - 1.0
def loss(P, Z):
""" KL divergence
P is the joint probabilities for this batch (Keras loss functions call this y_true)
Z is the low-dimensional output (Keras loss functions call this y_pred)
"""
Q = z2p(Z, d, n=batch_size, eps=eps)
return tf.math.reduce_sum(P * tf.math.log((P + eps) / (Q + eps)))
return loss
| 34.98773 | 108 | 0.593898 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,481 | 0.435034 |
800afe51edd1ad15aa21801c91bf0ec428b48bda | 3,466 | py | Python | test/unit/git_class/gitmerge_commits_diff.py | deepcoder42/git-lib | 7f5736ea71d6592390222a214b0e51cd3c3151f8 | [
"MIT"
] | null | null | null | test/unit/git_class/gitmerge_commits_diff.py | deepcoder42/git-lib | 7f5736ea71d6592390222a214b0e51cd3c3151f8 | [
"MIT"
] | null | null | null | test/unit/git_class/gitmerge_commits_diff.py | deepcoder42/git-lib | 7f5736ea71d6592390222a214b0e51cd3c3151f8 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Classification (U)
"""Program: gitmerge_commits_diff.py
Description: Unit testing of gitmerge.commits_diff in git_class.py.
Usage:
test/unit/git_class/gitmerge_commits_diff.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import collections
# Local
sys.path.append(os.getcwd())
import git_class
import version
__version__ = version.__version__
class Commits(object):
"""Class: Diff
Description: Class stub holder for git.gitrepo.iter_commits.
Methods:
__init
iter_commits
"""
def __init__(self, test_type):
"""Function: __init__
Description: Initialization of class instance.
Arguments:
"""
self.test_type = test_type
self.data_str = None
def iter_commits(self, data_str):
"""Method: iter_commits
Description: Method stub holder for git.gitrepo.iter_commits().
Arguments:
"""
self.data_str = data_str
index = collections.namedtuple('INDEX', 'commits')
if self.test_type == 1:
commit_list = []
commit_list.append(index('file1'))
commit_list.append(index('file2'))
elif self.test_type == 2:
commit_list = []
commit_list.append(index('file2'))
elif self.test_type == 3:
commit_list = []
return commit_list
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_commitsdiff_zero
test_commitsdiff_one
test_commitsdiff_two
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.repo_name = "Repo_name"
self.git_dir = "/directory/git"
self.url = "URL"
self.branch = "Remote_branch"
self.mod_branch = "Mod_branch"
self.gitr = git_class.GitMerge(self.repo_name, self.git_dir, self.url,
self.branch, self.mod_branch)
def test_commitsdiff_zero(self):
"""Function: test_commitsdiff_zero
Description: Test with zero commits difference.
Arguments:
"""
giti = collections.namedtuple('GIT', 'iter_commits')
commit = Commits(3).iter_commits
self.gitr.gitrepo = giti(commit)
self.assertEqual(self.gitr.commits_diff("Data"), 0)
def test_commitsdiff_one(self):
"""Function: test_commitsdiff_one
Description: Test with one commit difference.
Arguments:
"""
giti = collections.namedtuple('GIT', 'iter_commits')
commit = Commits(2).iter_commits
self.gitr.gitrepo = giti(commit)
self.assertEqual(self.gitr.commits_diff("Data"), 1)
def test_commitsdiff_two(self):
"""Function: test_commitsdiff_two
Description: Test with two commits difference.
Arguments:
"""
giti = collections.namedtuple('GIT', 'iter_commits')
commit = Commits(1).iter_commits
self.gitr.gitrepo = giti(commit)
self.assertEqual(self.gitr.commits_diff("Data"), 2)
if __name__ == "__main__":
unittest.main()
| 19.91954 | 78 | 0.613099 | 2,877 | 0.830063 | 0 | 0 | 0 | 0 | 0 | 0 | 1,556 | 0.448932 |
800d28c1f91ca5044c0a8edd4c02b62977545c76 | 500 | py | Python | Chapter04/code/func_perf3.py | PacktPublishing/IPython-7-Cookbook | 8b08b1de8b1b1ac75116873d820ed289d4173327 | [
"MIT"
] | 2 | 2019-03-30T02:44:37.000Z | 2021-10-04T17:56:42.000Z | Chapter04/code/func_perf3.py | PacktPublishing/IPython-7-Cookbook | 8b08b1de8b1b1ac75116873d820ed289d4173327 | [
"MIT"
] | null | null | null | Chapter04/code/func_perf3.py | PacktPublishing/IPython-7-Cookbook | 8b08b1de8b1b1ac75116873d820ed289d4173327 | [
"MIT"
] | 1 | 2019-01-30T01:59:44.000Z | 2019-01-30T01:59:44.000Z | import random
import string
def test_append(lst):
ret_val = []
for w in lst:
ret_val.append(w.lower( ))
return ret_val
def test_map(lst):
ret_val = map(str.lower, lst)
return ret_val
def run_tests(n):
for i in range(n):
tst = ''.join(random.choices(string.ascii_uppercase + string.digits, k=1000))
lst_tst = list(tst)
test_append(lst_tst)
test_map(lst_tst)
def main( ):
run_tests(100000)
if __name__ == "__main__":
main( )
| 19.230769 | 85 | 0.622 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.024 |
800e0260e131b801268f2e316c8771b9b824cfe5 | 4,826 | py | Python | mhmd-driver/parse_mtrees.py | cnasikas/smpc-analytics | bf663c38911b57c4b004498341a7882a57a21be2 | [
"MIT"
] | 12 | 2019-10-14T14:42:52.000Z | 2022-01-10T10:24:29.000Z | mhmd-driver/parse_mtrees.py | cnasikas/smpc-analytics | bf663c38911b57c4b004498341a7882a57a21be2 | [
"MIT"
] | null | null | null | mhmd-driver/parse_mtrees.py | cnasikas/smpc-analytics | bf663c38911b57c4b004498341a7882a57a21be2 | [
"MIT"
] | 1 | 2021-03-10T08:45:23.000Z | 2021-03-10T08:45:23.000Z | import os
import sys
import json
import argparse
from huepy import *
parser = argparse.ArgumentParser()
parser.add_argument('file', help= 'File with mtrees data (CSV or JSON)')
parser.add_argument('--mtrees', help = 'File with the mesh dictionary to be created (names to ids).', default = 'mhmd-driver/m.json')
parser.add_argument('--mtrees_inverted', help = 'File with the inverted mesh dictionary to be created (ids to names).', default = 'mhmd-driver/m_inv.json')
parser.add_argument('--mapping', help = 'File with the mesh term mapping to be created (values to integers).', default = 'mhmd-driver/mesh_mapping.json')
parser.add_argument('--verbose', help = 'See verbose output', action = 'store_true')
args = parser.parse_args()
def mesh_tree_depth(id):
if len(id) == 1:
return 0
else:
return id.count('.') + 1
def main():
d = {}
d_inv = {}
if args.verbose:
print(run('Reading mtrees file..'))
if args.file.endswith('.json'):
mtrees = json.load(open(args.file))
length = len(mtrees)
if args.verbose:
print(info('File contains ' + str(length) + ' entries.'))
print(run('Building dictionairies..'))
for entry in mtrees:
name = entry['name']
# code = entry['code']
id = entry['id']
if name in d:
d[name]['ids'].append(id)
else:
# d[name] = {'code': code, 'ids': [id]}
d[name] = {'ids': [id]}
if id not in d_inv:
d_inv[id] = name
else:
print(bad(id+' not in d'))
elif args.file.endswith('.csv'):
with open(args.file, 'r') as input:
if args.verbose:
print(run('Building dictionairies..'))
for line in input:
name = line.split(';')[0]
id = line.split(';')[1].strip()
if name in d:
d[name]['ids'].append(id)
else:
# d[name] = {'code': code, 'ids': [id]}
d[name] = {'ids': [id]}
if id not in d_inv:
d_inv[id] = name
else:
print(bad(id+' not in d'))
else:
print(bad('Wrong input file format'))
print(bad('Expected a CSV or JSON file.'))
# Add missing values -- Top tree level
d['Anatomy'] = {'ids':['A']}
d['Organisms'] = {'ids':['B']}
d['Diseases'] = {'ids':['C']}
d['Chemicals and Drugs'] = {'ids':['D']}
d['Analytical, Diagnostic and Therapeutic Techniques, and Equipment'] = {'ids':['E']}
d['Psychiatry and Psychology'] = {'ids':['F']}
d['Phenomena and Processes'] = {'ids':['G']}
d['Disciplines and Occupations'] = {'ids':['H']}
d['Anthropology, Education, Sociology, and Social Phenomena'] = {'ids':['I']}
d['Technology, Industry, and Agriculture'] = {'ids':['J']}
d['Humanities'] = {'ids':['K']}
d['Information Science'] = {'ids':['L']}
d['Named Groups'] = {'ids':['M']}
d['Health Care'] = {'ids':['N']}
d['Publication Characteristics'] = {'ids':['V']}
d['Geographicals'] = {'ids':['Z']}
d_inv['A'] = 'Anatomy'
d_inv['B'] = 'Organisms'
d_inv['C'] = 'Diseases'
d_inv['D'] = 'Chemicals and Drugs'
d_inv['E'] = 'Analytical, Diagnostic and Therapeutic Techniques, and Equipment'
d_inv['F'] = 'Psychiatry and Psychology'
d_inv['G'] = 'Phenomena and Processes'
d_inv['H'] = 'Disciplines and Occupations'
d_inv['I'] = 'Anthropology, Education, Sociology, and Social Phenomena'
d_inv['J'] = 'Technology, Industry, and Agriculture'
d_inv['K'] = 'Humanities'
d_inv['L'] = 'Information Science'
d_inv['M'] = 'Named Groups'
d_inv['N'] = 'Health Care'
d_inv['V'] = 'Publication Characteristics'
d_inv['Z'] = 'Geographicals'
with open(args.mtrees, 'w') as outfile:
json.dump(d, outfile)
with open(args.mtrees_inverted, 'w') as outfile:
json.dump(d_inv, outfile)
print(good('Dictionaries successfully stored at ' + args.mtrees + ' and ' + args.mtrees_inverted + '.'))
direct_children = {}
if args.verbose:
print(run('Generating Mesh mapping..'))
for id in d_inv.keys():
depth = mesh_tree_depth(id)
children_ids = [key for key in d_inv.keys() if key.startswith(id) and mesh_tree_depth(key) == depth + 1]
childred_mapping = dict((id , i) for i,id in enumerate(children_ids) )
direct_children[id] = childred_mapping
# print(info(id+': --> '+str(childred_mapping)))
with open(args.mapping, 'w') as outfile:
json.dump(direct_children, outfile)
print(good('Mesh mapping generated successfully at' + args.mapping + '.'))
if __name__ == '__main__':
main() | 37.703125 | 155 | 0.561542 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,932 | 0.400332 |
800e44a4c6050f23f945f1f76634a4002f79fc45 | 1,157 | py | Python | gen_art/graphics/Context.py | shnupta/SeeMyFeels | 0a37acc3e628d69f96197907db1c2ebd30b78469 | [
"MIT"
] | 3 | 2021-04-01T21:16:35.000Z | 2022-03-12T21:17:51.000Z | gen_art/graphics/Context.py | shnupta/SeeMyFeels | 0a37acc3e628d69f96197907db1c2ebd30b78469 | [
"MIT"
] | null | null | null | gen_art/graphics/Context.py | shnupta/SeeMyFeels | 0a37acc3e628d69f96197907db1c2ebd30b78469 | [
"MIT"
] | null | null | null | import cairo
from uuid import uuid4
from gen_art.graphics.Helpers import does_path_exist, open_file
from os import path
from datetime import datetime
class DrawContext:
def __init__(self, width, height, output_path, open_bool):
self.open_bool = open_bool
self.width = width
self.height = height
self.output_path = output_path
self.init()
def init(self):
self.cairo_context = self.setup_png()
def setup_png(self):
self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, self.width, self.height)
return cairo.Context(self.surface)
def export_png(self):
self.surface.write_to_png(self.output_path)
print("INFO: Saving file to {}".format(self.output_path))
if self.open_bool:
print("INFO: Opening file {}".format(self.output_path))
open_file(self.output_path)
def export(self):
self.export_png()
@property
def context(self):
return self.cairo_context
@context.setter
def context(self, context):
self.context = context
def get_output_path(self):
return self.output_path
| 26.906977 | 87 | 0.666379 | 1,004 | 0.867761 | 0 | 0 | 144 | 0.12446 | 0 | 0 | 48 | 0.041487 |
800e7bf42d2e64bac20018e9d06f0084d64e4d99 | 19,080 | py | Python | WISH/WISH.py | quantumopticslkb/phase_retrieval | 5bb7820d72aa4ba8a227753029738a5cfb2a581f | [
"MIT"
] | null | null | null | WISH/WISH.py | quantumopticslkb/phase_retrieval | 5bb7820d72aa4ba8a227753029738a5cfb2a581f | [
"MIT"
] | null | null | null | WISH/WISH.py | quantumopticslkb/phase_retrieval | 5bb7820d72aa4ba8a227753029738a5cfb2a581f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author : Tangui ALADJIDI
After the Matlab code from Yicheng WU
"""
import numpy as np
import matplotlib.pyplot as plt
from LightPipes import *
from PIL import Image
from time import time
from mpl_toolkits.axes_grid1 import make_axes_locatable
import time
import sys
import configparser
from scipy import io
import cupy as cp
from scipy.ndimage import zoom
"""
IMPORTANT NOTE : If the cupy module won't work, check that you have the right version of CuPy installed for you version
of CUDA Toolkit : https://docs-cupy.chainer.org/en/stable/install.html
If you are sure of you CuPy install, then it is possible that your nvidia kernel module froze or that some program
bars the access to CuPy. In this case reload your Nvidia module using these commands (in Unix) :
sudo rmmod nvidia_uvm
sudo modprobe nvidia_uvm
This usually happens after waking up you computer. You can always remove the lines with cupy code / "gpu" functions and
replace them with the surrounding commented lines to run the code in CPU mode.
"""
class WISH_Sensor:
def __init__(self, cfg_path):
conf = configparser.ConfigParser()
conf.read(cfg_path)
self.d_SLM = float(conf["params"]["d_SLM"])
self.d_CAM = float(conf["params"]["d_CAM"])
self.wavelength = float(conf["params"]["wavelength"])
self.z = float(conf["params"]["z"]) # propagation distance
self.N_gs = int(conf["params"]["N_gs"]) # number of GS iterations
self.N_mod = int(conf["params"]["N_mod"]) # number of modulation steps
self.N_os = int(conf["params"]["N_os"]) #number of observations per image (to avg noise)
def frt(self, A0: np.ndarray, d1: float, z: float):
"""
Implements propagation using Fresnel diffraction
:param A0: Field to propagate
:param d1: Sampling size of the field A0
:param z : Propagation distance in metres
:return: A : Propagated field
"""
wv = self.wavelength
k = 2*np.pi / wv
N = A0.shape[0]
x = np.linspace(0, N - 1, N) - (N / 2) * np.ones(N)
y = np.linspace(0, N - 1, N) - (N / 2) * np.ones(N)
d2 = wv * z / (N*d1)
X1, Y1 = d1 * np.meshgrid(x, y)[0], d1 * np.meshgrid(x, y)[1]
X2, Y2 = d2 * np.meshgrid(x, y)[0], d2 * np.meshgrid(x, y)[1]
R1 = np.sqrt(X1 ** 2 + Y1 ** 2)
R2 = np.sqrt(X2 ** 2 + Y2 ** 2)
D = 1 /(1j*wv*abs(z))
Q1 = np.exp(1j*(k/(2*z))*R1**2)
Q2 = np.exp(1j*(k/(2*z))*R2**2)
if z >=0:
A = D * Q2 * (d1**2) * np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(A0 * Q1), norm='ortho'))
#A = D * (d1**2) * np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(A0 ), norm='ortho'))
elif z<0:
A = D * Q2 * ((N*d1) ** 2) * np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(A0 * Q1), norm='ortho'))
#A = D * ((N*d1) ** 2) * np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(A0 ), norm='ortho'))
#A = A/np.max(np.abs(A))
return A
def frt_s(self, A0: np.ndarray, d1: float, z: float):
"""
Simplified Fresnel propagation optimized for GPU computing. Runs on a GPU using CuPy with a CUDA backend.
:param A0: Field to propagate
:param d1: Sampling size of the field A0
:param z : Propagation distance in metres
:return: A : Propagated field
"""
wv = self.wavelength
k = 2*np.pi / wv
N = A0.shape[0]
D = 1 /(1j*wv*abs(z))
if z >=0:
A =D * (d1**2) * np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(A0), norm='ortho'))
elif z<0:
A =D * ((N*d1) ** 2) * np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(A0), norm='ortho'))
return A
def frt_gpu(self, A0: np.ndarray, d1: float, z: float):
"""
Implements propagation using Fresnel diffraction. Runs on a GPU using CuPy with a CUDA backend.
:param A0: Field to propagate
:param d1: Sampling size of the field A0
:param z : Propagation distance in metres
:return: A : Propagated field
"""
wv = self.wavelength
k = 2*np.pi / wv
N = A0.shape[0]
x = cp.linspace(0, N - 1, N) - (N / 2) * cp.ones(N)
y = cp.linspace(0, N - 1, N) - (N / 2) * cp.ones(N)
d2 = wv * z / (N*d1)
X1, Y1 = d1 * cp.meshgrid(x, y)[0], d1 * cp.meshgrid(x, y)[1]
X2, Y2 = d2 * cp.meshgrid(x, y)[0], d2 * cp.meshgrid(x, y)[1]
R1 = cp.sqrt(X1 ** 2 + Y1 ** 2)
R2 = cp.sqrt(X2 ** 2 + Y2 ** 2)
D = 1 /(1j*wv*abs(z))
Q1 = cp.exp(1j*(k/(2*z))*R1**2)
Q2 = cp.exp(1j*(k/(2*z))*R2**2)
if z >=0:
A =D * Q2 * (d1**2) * cp.fft.fftshift(cp.fft.fft2(cp.fft.ifftshift(A0 * Q1), norm='ortho'))
elif z<0:
A =D * Q2 * ((N*d1) ** 2) * cp.fft.fftshift(cp.fft.ifft2(cp.fft.ifftshift(A0 * Q1), norm='ortho'))
return A
def frt_gpu_s(self, A0: np.ndarray, d1: float, z: float):
"""
Simplified Fresnel propagation optimized for GPU computing. Runs on a GPU using CuPy with a CUDA backend.
:param A0: Field to propagate
:param d1: Sampling size of the field A0
:param z : Propagation distance in metres
:return: A : Propagated field
"""
wv = self.wavelength
k = 2*np.pi / wv
N = A0.shape[0]
D = 1 /(1j*wv*abs(z))
if z >=0:
A =D * (d1**2) * cp.fft.fftshift(cp.fft.fft2(cp.fft.ifftshift(A0), norm='ortho'))
elif z<0:
A =D * ((N*d1) ** 2) * cp.fft.fftshift(cp.fft.ifft2(cp.fft.ifftshift(A0), norm='ortho'))
return A
def u4Tou3(self, u4: np.ndarray, delta4: float, z3: float):
"""
Propagates back a field from the sensor plane to the SLM plane
:param u4: Field to propagate back
:param delta4: Sampling size of the field u4
:param z3: Propagation distance in metres
:return: u3 the back propagated field
"""
u3 = self.frt(u4, delta4, -z3);
return u3
def gen_ims(self, u3: np.ndarray, z3: float, delta3: float, Nim: int, noise: float):
"""
Generates dummy signal in the sensor plane from the pre generated SLM patterns
:param u3: Initial field in the SLM plane
:param z3: Propagation distance in metres
:param delta3: "apparent" sampling size of the SLM plane (as seen by the image plane from z3 m away)
:param Nim: Number of images to generate
:param noise: Intensity of the gaussian noise added to the images
:return ims: Generated signal in the sensor plane of size (N,N,Nim)
"""
if Nim > 60:
print('max Nim is 60.')
raise
N = u3.shape[0]
delta_SLM = self.d_SLM
L_SLM = delta_SLM * 1080
x = np.linspace(0, N - 1, N) - (N / 2) * np.ones(N)
y = np.linspace(0, N - 1, N) - (N / 2) * np.ones(N)
XX, YY = np.meshgrid(x,y)
A_SLM = (np.abs(XX) * delta3 < L_SLM / 2) * (np.abs(YY) * delta3 < L_SLM / 2)
slm = np.array(io.loadmat('/home/tangui/Documents/LKB/WISH/slm60_resize10.mat')['slm'])
if slm.dtype=='uint8':
slm = slm.astype(float)/256
ims = np.zeros((N, N, Nim), dtype=float)
for i in range(Nim):
sys.stdout.write(f"\rGenerating image {i+1} out of {Nim} ...")
sys.stdout.flush()
slm0 = slm[:, 421: 1500, i]
slm1 = zoom(slm0, delta_SLM / delta3)
slm1 = np.pad(slm1, (round((N - slm1.shape[0])/ 2), round((N - slm1.shape[1]) / 2)))
if slm1.shape[0] > N:
slm1 = slm1[0:N, :]
if slm1.shape[1] > N:
slm1 = slm1[:, 0:N]
a31 = u3 * A_SLM * np.exp(1j * slm1 * 2 * np.pi)
a31 = cp.asarray(a31) #put the field in the GPU
#a4 = self.frt(a31, delta3, z3)
a4 = self.frt_gpu(a31, delta3, z3)
w = noise * cp.random.rand(N, N)
ya = cp.abs(a4)**2 + w
ya[ya<0]=0
#ims[:,:, i] = ya
ims[:,:, i] = cp.asnumpy(ya)
return ims
def process_SLM(self, slm: np.ndarray, N: int, Nim: int, delta3: float):
"""
Scales the pre generated SLM patterns to the right size taking into account the apparent size of the SLM in
the sensor field of view.
:param slm: Input SLM patterns
:param N: Size of the calculation (typically the sensor number of pixels)
:param Nim: Number of images to generate
:param delta3: Sampling size of the SLM plane (typically the "apparent" sampling size wvl*z/N*d_Sensor )
:return SLM: Rescaled and properly shaped SLM patterns of size (N,N,Nim)
"""
delta_SLM = self.d_SLM
if slm.dtype == 'uint8':
slm = slm.astype(float)/256
slm2 = slm[:, 421: 1501, 0:Nim] #takes a 1080x1080 square of the SLM
slm3 = np.empty((N,N,Nim))
#could replace with my modulate function
#scale SLM slices to the right size
for i in range(Nim):
slm1 = zoom(slm2[:,:,i], delta_SLM / delta3)
slm1 = np.pad(slm1, (round((N - slm1.shape[0]) / 2), round((N - slm1.shape[1]) / 2)))
if slm1.shape[0] > N:
slm3[:,:,i] = slm1[0:N, :]
if slm1.shape[1] > N:
slm3[:,:,i] = slm1[:, 0:N]
else :
slm3[:,:,i] = slm1
plt.imshow(slm3[:,:,i])
plt.show()
SLM = np.exp(1j * 2 * np.pi * slm3).astype(np.complex64)
return SLM
def process_ims(self, ims: np.ndarray, N: int):
"""
Converts images to amplitudes and eventually resizes them.
:param ims: images to convert
:param N: Size of the sensor
:return y0 : Processed field of size (N,N, Nim)
"""
y0 = np.real(np.sqrt(ims)); # change from intensity to magnitude
y0 = np.pad(y0, (round((N - y0.shape[0]) / 2), round((N - y0.shape[1]) / 2)))
if y0.shape[0] > N:
y0=y0[0:N,0:N,:]
return y0
def WISHrun(self, y0: np.ndarray, SLM: np.ndarray, delta3: float, delta4: float, N_os: int, N_iter: int,\
N_batch: int, plot: bool=True):
"""
Runs the WISH algorithm using a Gerchberg Saxton loop for phase retrieval.
:param y0: Target modulated amplitudes in the sensor plane
:param SLM: SLM modulation patterns
:param delta3: Apparent sampling size of the SLM as seen from the sensor plane
:param delta4: Sampling size of the sensor plane
:param N_os: Number of observations per image
:param N_iter: Maximal number of Gerchberg Saxton iterations
:param N_batch: Number of batches (modulations)
:param plot: If True, plots the advance of the retrieval every 10 iterations
:return u4_est, idx_converge: Estimated field of size (N,N) and the convergence indices to check convergence
speed
"""
wvl = self.wavelength
z3 = self.z
## parameters
N = y0.shape[0]
k = 2 * np.pi / wvl
#u3_batch = np.zeros((N, N, N_os), dtype=complex) # store all U3 gpu
#u4 = np.zeros((N, N, N_os), dtype=complex) # gpu
#y = np.zeros((N, N, N_os), dtype=complex) # store all U3 gpu
u3_batch = cp.zeros((N, N, N_os), dtype=cp.complex64) # store all U3 gpu
u4 = cp.zeros((N, N, N_os), dtype=cp.complex64) # gpu
y = cp.zeros((N, N, N_os), dtype=cp.complex64) # store all U3 gpu
## initilize a3
k = 2 * np.pi / wvl
xx = cp.linspace(0, N - 1, N, dtype=cp.float) - (N / 2) * cp.ones(N, dtype=cp.float)
yy = cp.linspace(0, N - 1, N, dtype=cp.float) - (N / 2) * cp.ones(N, dtype=cp.float)
X, Y = float(delta4) * cp.meshgrid(xx, yy)[0], float(delta4) * cp.meshgrid(xx, yy)[1]
R = cp.sqrt(X ** 2 + Y ** 2)
Q = cp.exp(1j*(k/(2*z3))*R**2)
for ii in range(N_os):
#SLM_batch = SLM[:,:, ii]
SLM_batch = cp.asarray(SLM[:,:, ii])
y0_batch = y0[:,:, ii]
#u3_batch[:,:, ii] = self.frt(y0_batch, delta4, -z3) * np.conj(SLM_batch) #y0_batch gpu
#u3_batch[:,:, ii] = self.frt_gpu(cp.asarray(y0_batch), delta4, -z3) * cp.conj(SLM_batch) #y0_batch gpu
u3_batch[:,:, ii] = self.frt_gpu_s(cp.asarray(y0_batch)/Q, delta4, -z3) * cp.conj(SLM_batch) #y0_batch gpu
#u3 = np.mean(u3_batch, 2) # average it
u3 = cp.mean(u3_batch, 2)
## Recon run : GS loop
idx_converge = np.empty(N_iter)
for jj in range(N_iter):
sys.stdout.write(f"\rGS iteration {jj+1}")
sys.stdout.flush()
#u3_collect = np.zeros(u3.shape, dtype=complex)
u3_collect = cp.zeros(u3.shape, dtype=cp.complex64)
idx_converge0 = np.empty(N_batch)
for idx_batch in range(N_batch):
# put the correct batch into the GPU (no GPU for now)
#SLM_batch = SLM[:,:, int(N_os * idx_batch): int(N_os * (idx_batch+1))]
#y0_batch = y0[:,:, int(N_os * idx_batch): int(N_os * (idx_batch+1))]
SLM_batch = cp.asarray(SLM[:,:, int(N_os * idx_batch): int(N_os * (idx_batch+1))])
y0_batch = cp.asarray(y0[:,:, int(N_os * idx_batch): int(N_os * (idx_batch+1))])
for _ in range(N_os):
#u4[:,:,_] = self.frt(u3 * SLM_batch[:,:,_], delta3, z3) # U4 is the field on the sensor
u4[:,:,_] = self.frt_gpu_s(u3 * SLM_batch[:,:,_], delta3, z3) # U4 is the field on the sensor
y[:,:,_] = y0_batch[:,:,_] * cp.exp(1j * cp.angle(u4[:,:,_])) # force the amplitude of y to be y0
#u3_batch[:,:,_] = self.frt(y[:,:,_], delta4, -z3) * np.conj(SLM_batch[:,:,_])
u3_batch[:,:,_] = self.frt_gpu_s(y[:,:,_], delta4, -z3) * cp.conj(SLM_batch[:,:,_])
#u3_collect = u3_collect + np.mean(u3_batch, 2) # collect(add) U3 from each batch
u3_collect = u3_collect + cp.mean(u3_batch, 2) # collect(add) U3 from each batch
#idx_converge0[idx_batch] = np.mean(np.mean(np.mean(y0_batch,1),0)/np.sum(np.sum(np.abs(np.abs(u4)-y0_batch),1),0))
#idx_converge0[idx_batch] = cp.asnumpy(cp.mean(cp.mean(cp.mean(y0_batch,1),0)/cp.sum(cp.sum(cp.abs(cp.abs(u4)-y0_batch),1),0)))
# convergence index matrix for each batch
idx_converge0[idx_batch] = cp.linalg.norm(cp.abs(u4)-y0_batch)/ cp.linalg.norm(y0_batch)
u3 = (u3_collect / N_batch) # average over batches
idx_converge[jj] = np.mean(idx_converge0) # sum over batches
sys.stdout.write(f" (convergence index : {idx_converge[jj]})")
#u4_est = self.frt(u3, delta3, z3)
u4_est = cp.asnumpy(self.frt_gpu_s(u3, delta3, z3)*Q)
if jj % 10 == 0 and plot:
plt.close('all')
fig=plt.figure(0)
fig.suptitle(f'Iteration {jj}')
ax1=fig.add_subplot(121)
ax2=fig.add_subplot(122)
im=ax1.imshow(np.abs(u4_est), cmap='viridis')
ax1.set_title('Amplitude')
ax2.imshow(np.angle(u4_est), cmap='viridis')
ax2.set_title('Phase')
fig1=plt.figure(1)
ax = fig1.gca()
ax.plot(np.arange(0,jj,1), idx_converge[0:jj], marker='o')
ax.set_xlabel('Iterations')
ax.set_ylabel('Convergence estimator')
ax.set_title('Convergence curve')
plt.show()
time.sleep(2)
# exit if the matrix doesn 't change much
if jj > 1:
if cp.abs(idx_converge[jj] - idx_converge[jj - 1]) / idx_converge[jj] < 1e-4:
print('\nConverged. Exit the GS loop ...')
#idx_converge = idx_converge[0:jj]
idx_converge = cp.asnumpy(idx_converge[0:jj])
break
return u4_est, idx_converge
#WISH routine
def main():
#start timer
T0 = time.time()
#instantiate WISH
Sensor = WISH_Sensor("wish_3.conf")
im = np.array(Image.open('intensities/resChart.bmp'))[:,:,0]
u40 = np.pad(im.astype(np.float)/256, (256,256))
wvl = Sensor.wavelength
z3 = Sensor.z
delta4 = Sensor.d_CAM
N = u40.shape[0]
delta3 = wvl * z3 / (N * delta4)
u30 = Sensor.u4Tou3(u40, delta4, z3)
## forward prop to the sensor plane with SLM modulation
print('Generating simulation data images ...')
noise = 0.01
Nim = Sensor.N_mod*Sensor.N_os
ims = Sensor.gen_ims(u30, z3, delta3, Nim, noise)
print('\nCaptured images are simulated')
#clear u30, u40 for memory economy
del u30
## reconstruction
# pre - process the data
# for the SLM : correct scaling
slm = np.array(io.loadmat('/home/tangui/Documents/LKB/WISH/slm60_resize10.mat')['slm'])
SLM = Sensor.process_SLM(slm, N, Nim, delta3)
#process the captured image : converting to amplitude and padding if needed
y0 = Sensor.process_ims(ims, N)
##Recon initilization
N_os = Sensor.N_os # number of images per batch
if Nim < N_os:
N_os = Nim
N_iter = Sensor.N_gs # number of GS iterations
N_batch = int(Nim / N_os) # number of batches
u4_est, idx_converge = Sensor.WISHrun(y0, SLM, delta3, delta4, N_os, N_iter, N_batch, plot=False)
#total time
T= time.time()-T0
print(f"\n Total time elapsed : {T} s")
fig=plt.figure()
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
divider1 = make_axes_locatable(ax1)
cax1 = divider1.append_axes('right', size='5%', pad=0.05)
divider2 = make_axes_locatable(ax2)
cax2 = divider2.append_axes('right', size='5%', pad=0.05)
divider3 = make_axes_locatable(ax3)
cax3 = divider3.append_axes('right', size='5%', pad=0.05)
divider4 = make_axes_locatable(ax4)
cax4 = divider4.append_axes('right', size='5%', pad=0.05)
im1=ax1.imshow(np.abs(u40)**2, cmap='viridis', vmin=0, vmax=1)
ax1.set_title('Amplitude GT')
im2=ax2.imshow(np.angle(u40), cmap='viridis',vmin=-np.pi, vmax=np.pi)
ax2.set_title('Phase GT')
im3=ax3.imshow(abs(u4_est), cmap='viridis', vmin=0, vmax=1)
ax3.set_title('Amplitude estimation')
im4=ax4.imshow(np.angle(u4_est), cmap='viridis', vmin=-np.pi, vmax=np.pi)
ax4.set_title('Phase estimation')
ax5.plot(np.arange(0, len(idx_converge),1), idx_converge)
ax5.set_title("Convergence curve")
ax5.set_xlabel("Iteration")
ax5.set_ylabel("Convergence index")
fig.colorbar(im1, cax=cax1)
fig.colorbar(im2, cax=cax2)
fig.colorbar(im3, cax=cax3)
fig.colorbar(im4, cax=cax4)
plt.show()
if __name__=="__main__":
main()
| 45.645933 | 143 | 0.56782 | 15,173 | 0.795231 | 0 | 0 | 0 | 0 | 0 | 0 | 7,611 | 0.398899 |
80108f12068f5412f0cd647b2c7a236979e1bd0d | 8,635 | py | Python | sunpy/net/jsoc/tests/test_jsoc.py | s0nskar/sunpy | 60ca4792ded4c3938a78da7055cf2c20e0e8ccfd | [
"MIT"
] | null | null | null | sunpy/net/jsoc/tests/test_jsoc.py | s0nskar/sunpy | 60ca4792ded4c3938a78da7055cf2c20e0e8ccfd | [
"MIT"
] | null | null | null | sunpy/net/jsoc/tests/test_jsoc.py | s0nskar/sunpy | 60ca4792ded4c3938a78da7055cf2c20e0e8ccfd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 26 20:17:06 2014
@author: stuart
"""
import os
import tempfile
import datetime
import astropy.table
import astropy.time
import astropy.units as u
import pytest
from sunpy.time import parse_time
from sunpy.net.jsoc import JSOCClient, JSOCResponse
from sunpy.net.vso.vso import Results
import sunpy.net.jsoc.attrs as attrs
client = JSOCClient()
def test_jsocresponse_double():
j1 = JSOCResponse(table=astropy.table.Table(data=[[1,2,3,4]]))
j1.append(astropy.table.Table(data=[[1,2,3,4]]))
assert isinstance(j1, JSOCResponse)
assert all(j1.table == astropy.table.vstack([astropy.table.Table(data=[[1,2,3,4]]),
astropy.table.Table(data=[[1,2,3,4]])]))
def test_jsocresponse_single():
j1 = JSOCResponse(table=None)
assert len(j1) == 0
j1.append(astropy.table.Table(data=[[1,2,3,4]]))
assert all(j1.table == astropy.table.Table(data=[[1,2,3,4]]))
assert len(j1) == 4
def test_payload():
start = parse_time('2012/1/1T00:00:00')
end = parse_time('2012/1/1T00:00:45')
payload = client._make_query_payload(start, end, 'hmi.M_42s', notify='@')
payload_expected = {
'ds': '{0}[{1}-{2}]'.format('hmi.M_42s',
start.strftime("%Y.%m.%d_%H:%M:%S_TAI"),
end.strftime("%Y.%m.%d_%H:%M:%S_TAI")),
'format': 'json',
'method': 'url',
'notify': '@',
'op': 'exp_request',
'process': 'n=0|no_op',
'protocol': 'FITS,compress Rice',
'requestor': 'none',
'filenamefmt': '{0}.{{T_REC:A}}.{{CAMERA}}.{{segment}}'.format('hmi.M_42s')
}
assert payload == payload_expected
def test_payload_nocompression():
start = parse_time('2012/1/1T00:00:00')
end = parse_time('2012/1/1T00:00:45')
payload = client._make_query_payload(start, end, 'hmi.M_42s',
compression=None, notify='jsoc@cadair.com')
payload_expected = {
'ds':'{0}[{1}-{2}]'.format('hmi.M_42s', start.strftime("%Y.%m.%d_%H:%M:%S_TAI"),
end.strftime("%Y.%m.%d_%H:%M:%S_TAI")),
'format':'json',
'method':'url',
'notify':'jsoc@cadair.com',
'op':'exp_request',
'process':'n=0|no_op',
'protocol':'FITS, **NONE**',
'requestor':'none',
'filenamefmt':'{0}.{{T_REC:A}}.{{CAMERA}}.{{segment}}'.format('hmi.M_42s')
}
assert payload == payload_expected
def test_payload_protocol():
start = parse_time('2012/1/1T00:00:00')
end = parse_time('2012/1/1T00:00:45')
payload = client._make_query_payload(start, end, 'hmi.M_42s', protocol='as-is',
notify='jsoc@cadair.com')
payload_expected = {
'ds':'{0}[{1}-{2}]'.format('hmi.M_42s', start.strftime("%Y.%m.%d_%H:%M:%S_TAI"),
end.strftime("%Y.%m.%d_%H:%M:%S_TAI")),
'format':'json',
'method':'url',
'notify':'jsoc@cadair.com',
'op':'exp_request',
'process':'n=0|no_op',
'protocol':'as-is',
'requestor':'none',
'filenamefmt':'{0}.{{T_REC:A}}.{{CAMERA}}.{{segment}}'.format('hmi.M_42s')
}
assert payload == payload_expected
def test_process_time_string():
start = client._process_time('2012/1/1T00:00:00')
assert start == datetime.datetime(year=2012, month=1, day=1, second=34)
def test_process_time_datetime():
start = client._process_time(datetime.datetime(year=2012, month=1, day=1))
assert start == datetime.datetime(year=2012, month=1, day=1, second=34)
def test_process_time_astropy():
start = client._process_time(astropy.time.Time('2012-01-01T00:00:00', format='isot', scale='utc'))
assert start == datetime.datetime(year=2012, month=1, day=1, second=34)
def test_process_time_astropy_tai():
start = client._process_time(astropy.time.Time('2012-01-01T00:00:00', format='isot', scale='tai'))
assert start == datetime.datetime(year=2012, month=1, day=1, second=0)
@pytest.mark.online
def test_status_request():
r = client._request_status('none')
assert r.json() == {u'error': u'requestid none is not an acceptable ID for the external export system (acceptable format is JSOC_YYYYMMDD_NNN_X_IN or JSOC_YYYYMMDD_NNN).',
u'status': 4}
def test_empty_jsoc_response():
Jresp = JSOCResponse()
assert Jresp.table is None
assert Jresp.query_args is None
assert Jresp.requestIDs is None
assert str(Jresp) == 'None'
assert repr(Jresp) == 'None'
assert len(Jresp) == 0
@pytest.mark.online
def test_query():
Jresp = client.query(attrs.Time('2012/1/1T00:00:00', '2012/1/1T00:01:30'),
attrs.Series('hmi.M_45s'),attrs.Sample(90*u.second))
assert isinstance(Jresp, JSOCResponse)
assert len(Jresp) == 2
@pytest.mark.online
def test_post_pass():
responses = client.query(attrs.Time('2012/1/1T00:00:00', '2012/1/1T00:00:45'),
attrs.Series('hmi.M_45s'), attrs.Notify('jsoc@cadair.com'))
aa = client.request_data(responses, return_resp=True)
tmpresp = aa[0].json()
assert tmpresp['status'] == 2
assert tmpresp['protocol'] == 'FITS,compress Rice'
assert tmpresp['method'] == 'url'
@pytest.mark.online
def test_post_wavelength():
responses = client.query(attrs.Time('2010/07/30T13:30:00','2010/07/30T14:00:00'),attrs.Series('aia.lev1_euv_12s'),
attrs.Wavelength(193*u.AA)|attrs.Wavelength(335*u.AA), attrs.Notify('jsoc@cadair.com'))
aa = client.request_data(responses, return_resp=True)
tmpresp = aa[0].json()
assert tmpresp['status'] == 2
assert tmpresp['protocol'] == 'FITS,compress Rice'
assert tmpresp['method'] == 'url'
assert tmpresp['rcount'] == 302
@pytest.mark.online()
def test_post_wave_series():
with pytest.raises(TypeError):
client.query(attrs.Time('2012/1/1T00:00:00', '2012/1/1T00:00:45'),
attrs.Series('hmi.M_45s')|attrs.Series('aia.lev1_euv_12s'),
attrs.Wavelength(193*u.AA)|attrs.Wavelength(335*u.AA))
@pytest.mark.online
def test_post_fail(recwarn):
res = client.query(attrs.Time('2012/1/1T00:00:00', '2012/1/1T00:00:45'),
attrs.Series('none'), attrs.Notify('jsoc@cadair.com'))
client.request_data(res, return_resp=True)
w = recwarn.pop(Warning)
assert issubclass(w.category, Warning)
assert "Query 0 returned status 4 with error Series none is not a valid series accessible from hmidb2." == str(w.message)
assert w.filename
assert w.lineno
@pytest.mark.online
def test_request_status_fail():
resp = client._request_status('none')
assert resp.json() == {u'status': 4, u'error': u"requestid none is not an acceptable ID for the external export system (acceptable format is JSOC_YYYYMMDD_NNN_X_IN or JSOC_YYYYMMDD_NNN)."}
resp = client._request_status(['none'])
assert resp.json() == {u'status': 4, u'error': u"requestid none is not an acceptable ID for the external export system (acceptable format is JSOC_YYYYMMDD_NNN_X_IN or JSOC_YYYYMMDD_NNN)."}
@pytest.mark.online
#@pytest.mark.xfail
def test_wait_get():
responses = client.query(attrs.Time('2012/1/1T1:00:36', '2012/1/1T01:00:38'),
attrs.Series( 'hmi.M_45s'), attrs.Notify('jsoc@cadair.com'))
path = tempfile.mkdtemp()
res = client.get(responses, path=path)
assert isinstance(res, Results)
assert res.total == 1
@pytest.mark.online
def test_get_request():
responses = client.query(attrs.Time('2012/1/1T1:00:36', '2012/1/1T01:00:38'),
attrs.Series('hmi.M_45s'), attrs.Notify('jsoc@cadair.com'))
bb = client.request_data(responses)
path = tempfile.mkdtemp()
aa = client.get_request(bb, path=path)
assert isinstance(aa, Results)
@pytest.mark.online
def test_results_filenames():
responses = client.query(attrs.Time('2014/1/1T1:00:36', '2014/1/1T01:01:38'),
attrs.Series('hmi.M_45s'), attrs.Notify('jsoc@cadair.com'))
path = tempfile.mkdtemp()
aa = client.get(responses, path=path)
assert isinstance(aa, Results)
files = aa.wait()
assert len(files) == len(responses)
for hmiurl in aa.map_:
assert os.path.basename(hmiurl) == os.path.basename(aa.map_[hmiurl]['path'])
@pytest.mark.online
def test_invalid_query():
with pytest.raises(ValueError):
resp = client.query(attrs.Time('2012/1/1T01:00:00', '2012/1/1T01:00:45'))
| 37.707424 | 192 | 0.626288 | 0 | 0 | 0 | 0 | 4,244 | 0.491488 | 0 | 0 | 2,436 | 0.282108 |
80116cc041c16e4efbe2e37276ff9ca9425b882b | 293 | py | Python | turtle_lib/colorful_spiral.py | PitPietro/pascal-triangle | eb81e9fc4728f4e09a631922c470201a9f897195 | [
"MIT"
] | 1 | 2020-03-11T10:20:53.000Z | 2020-03-11T10:20:53.000Z | turtle_lib/colorful_spiral.py | PitPietro/python-structure | eb81e9fc4728f4e09a631922c470201a9f897195 | [
"MIT"
] | 1 | 2020-07-06T15:45:01.000Z | 2020-07-06T15:50:32.000Z | turtle_lib/colorful_spiral.py | PitPietro/python-structure | eb81e9fc4728f4e09a631922c470201a9f897195 | [
"MIT"
] | 1 | 2020-07-02T05:21:58.000Z | 2020-07-02T05:21:58.000Z | import turtle
if __name__ == '__main__':
colors = ['red', 'green', 'yellow', 'purple', 'blue', 'orange']
turtle.bgcolor('black')
for i in range(360):
turtle.pencolor(colors[i % 6])
turtle.width(int(i / 100 + 1))
turtle.forward(i)
turtle.left(59)
| 22.538462 | 67 | 0.56314 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.201365 |
8012691c1adce7b34dee33223df2c745e8a1cd12 | 5,830 | py | Python | tests/test_ref_numpy.py | kuraisle/multipletau | 0321de77616f05ca90106075f7f6ecd137437be7 | [
"BSD-3-Clause"
] | 10 | 2017-01-25T15:47:06.000Z | 2022-01-07T10:08:48.000Z | tests/test_ref_numpy.py | kuraisle/multipletau | 0321de77616f05ca90106075f7f6ecd137437be7 | [
"BSD-3-Clause"
] | 7 | 2016-02-10T10:19:22.000Z | 2018-11-30T23:21:04.000Z | tests/test_ref_numpy.py | kuraisle/multipletau | 0321de77616f05ca90106075f7f6ecd137437be7 | [
"BSD-3-Clause"
] | 4 | 2018-08-22T07:19:52.000Z | 2018-11-05T09:16:52.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Compare to numpy data"""
import sys
import numpy as np
import multipletau
from test_correlate import get_sample_arrays_cplx
def test_corresponds_ac():
myframe = sys._getframe()
myname = myframe.f_code.co_name
print("running ", myname)
a = np.concatenate(get_sample_arrays_cplx()).real
m = 16
restau = multipletau.autocorrelate(a=1*a,
m=m,
copy=True,
normalize=True,
dtype=np.float_)
reslin = multipletau.correlate_numpy(a=1*a,
v=1*a,
copy=True,
normalize=True,
dtype=np.float_)
idx = np.array(restau[:, 0].real, dtype=int)[:m]
assert np.allclose(reslin[idx, 1], restau[:m, 1])
def test_corresponds_ac_first_loop():
"""
numpy correlation:
G_m = sum_i(a_i*a_{i+m})
multipletau correlation 2nd order:
b_j = (a_{2i} + a_{2i+1} / 2)
G_m = sum_j(b_j*b_{j+1})
= 1/4*sum_i(a_{2i} * a_{2i+m} +
a_{2i} * a_{2i+m+1} +
a_{2i+1} * a_{2i+m} +
a_{2i+1} * a_{2i+m+1}
)
The values after the first m+1 lag times in the multipletau
correlation differ from the normal correlation, because the
traces are averaged over two consecutive items, effectively
halving the size of the trace. The multiple-tau correlation
can be compared to the regular correlation by using an even
sized sequence (here 222) in which the elements 2i and 2i+1
are equal, as is done in this test.
"""
myframe = sys._getframe()
myname = myframe.f_code.co_name
print("running ", myname)
a = [arr / np.average(arr) for arr in get_sample_arrays_cplx()]
a = np.concatenate(a)[:222]
# two consecutive elements are the same, so the multiple-tau method
# corresponds to the numpy correlation for the first loop.
a[::2] = a[1::2]
for m in [2, 4, 6, 8, 10, 12, 14, 16]:
restau = multipletau.correlate(a=a,
v=a.imag+1j*a.real,
m=m,
copy=True,
normalize=False,
dtype=np.complex_)
reslin = multipletau.correlate_numpy(a=a,
v=a.imag+1j*a.real,
copy=True,
normalize=False,
dtype=np.complex_)
idtau = np.where(restau[:, 0] == m+2)[0][0]
tau3 = restau[idtau, 1] # m+1 initial bins
idref = np.where(reslin[:, 0] == m+2)[0][0]
tau3ref = reslin[idref, 1]
assert np.allclose(tau3, tau3ref)
def test_corresponds_ac_nonormalize():
myframe = sys._getframe()
myname = myframe.f_code.co_name
print("running ", myname)
a = np.concatenate(get_sample_arrays_cplx()).real
m = 16
restau = multipletau.autocorrelate(a=1*a,
m=m,
copy=True,
normalize=False,
dtype=np.float_)
reslin = multipletau.correlate_numpy(a=1*a,
v=1*a,
copy=True,
normalize=False,
dtype=np.float_)
idx = np.array(restau[:, 0].real, dtype=int)[:m+1]
assert np.allclose(reslin[idx, 1], restau[:m+1, 1])
def test_corresponds_cc():
myframe = sys._getframe()
myname = myframe.f_code.co_name
print("running ", myname)
a = np.concatenate(get_sample_arrays_cplx())
m = 16
restau = multipletau.correlate(a=a,
v=a.imag+1j*a.real,
m=m,
copy=True,
normalize=True,
dtype=np.complex_)
reslin = multipletau.correlate_numpy(a=a,
v=a.imag+1j*a.real,
copy=True,
normalize=True,
dtype=np.complex_)
idx = np.array(restau[:, 0].real, dtype=int)[:m+1]
assert np.allclose(reslin[idx, 1], restau[:m+1, 1])
def test_corresponds_cc_nonormalize():
myframe = sys._getframe()
myname = myframe.f_code.co_name
print("running ", myname)
a = np.concatenate(get_sample_arrays_cplx())
m = 16
restau = multipletau.correlate(a=a,
v=a.imag+1j*a.real,
m=m,
copy=True,
normalize=False,
dtype=np.complex_)
reslin = multipletau.correlate_numpy(a=a,
v=a.imag+1j*a.real,
copy=True,
normalize=False,
dtype=np.complex_)
idx = np.array(restau[:, 0].real, dtype=int)[:m+1]
assert np.allclose(reslin[idx, 1], restau[:m+1, 1])
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
| 33.125 | 71 | 0.456089 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,089 | 0.186792 |
801269a1716f4f173be7b06c27dd2d4ed41ac1c7 | 2,613 | py | Python | fedlearner/platform/trainer_master/leader_tm.py | melong007/fedlearner | 69738daf8272148781cfe3c93fb41d2ac67faad7 | [
"Apache-2.0"
] | null | null | null | fedlearner/platform/trainer_master/leader_tm.py | melong007/fedlearner | 69738daf8272148781cfe3c93fb41d2ac67faad7 | [
"Apache-2.0"
] | null | null | null | fedlearner/platform/trainer_master/leader_tm.py | melong007/fedlearner | 69738daf8272148781cfe3c93fb41d2ac67faad7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import argparse
import logging
from trainer_master import TrainerMaster
from data.data_block_queue import DataBlockQueue
from data.data_source_reader import DataSourceReader
class LeaderTrainerMaster(TrainerMaster):
def __init__(self, application_id, data_source_reader_):
super(LeaderTrainerMaster, self).__init__(application_id)
self._data_block_queue = DataBlockQueue()
self._data_source_reader = data_source_reader_
def _load_data(self):
checkpoint = self._get_checkpoint()
for data_block in self._data_source_reader.list_data_block():
if data_block.block_id not in checkpoint:
self._data_block_queue.put(data_block)
def _alloc_data_block(self, block_id=None):
# block_id is unused in leader role
data_blocks_resp = None
if not self._data_block_queue.empty():
data_blocks_resp = self._data_block_queue.get()
return data_blocks_resp
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
parser = argparse.ArgumentParser('leader trainer master cmd.')
parser.add_argument('-p', '--port', type=int, default=50001,
help='Listen port of leader trainer master')
parser.add_argument('-app_id', '--application_id',
required=True, help='application_id')
parser.add_argument('-data_path', '--data_path',
required=True, help='training example data path')
parser.add_argument('-start_date', '--start_date',
default=None, help='training data start date')
parser.add_argument('-end_date', '--end_date',
default=None, help='training data end date')
FLAGS = parser.parse_args()
data_source_reader = DataSourceReader(
FLAGS.data_path, FLAGS.start_date, FLAGS.end_date)
leader_tm = LeaderTrainerMaster(FLAGS.application_id, data_source_reader)
leader_tm.run(listen_port=FLAGS.port)
| 41.47619 | 77 | 0.707616 | 787 | 0.301186 | 0 | 0 | 0 | 0 | 0 | 0 | 929 | 0.35553 |
8012bbcd8fca728c35b24ea46da9f760928eac9b | 1,753 | py | Python | comics_webscrapper.py | WittyShiba/Comics-Webscrapping | ef073dc52954975419fb45da72271906bb075f4f | [
"MIT"
] | null | null | null | comics_webscrapper.py | WittyShiba/Comics-Webscrapping | ef073dc52954975419fb45da72271906bb075f4f | [
"MIT"
] | null | null | null | comics_webscrapper.py | WittyShiba/Comics-Webscrapping | ef073dc52954975419fb45da72271906bb075f4f | [
"MIT"
] | null | null | null | # Homework 7
# Write a python program that will download the latest 10 comic images from https://www.gocomics.com/pearlsbeforeswine/
# Navigate to the latest page by clicking 'Read More'.
import requests
import bs4
import os
url = 'https://www.gocomics.com/pearlsbeforeswine/2019/08/21'
for i in range(10):
res = requests.get(url) # download web page to save into res obj.
res.raise_for_status() # check for a successful download.
# create BeautifulSoup object to store html source code as .txt file
code_text = bs4.BeautifulSoup(res.text, "html.parser")
# find specific image url
img_tag = code_text.select('a[itemprop="image"]')[0].contents[1].contents[0] # trace down the tree structure to get <img> tag # find <a> tag
image_url = img_tag.attrs['src']
title_url = img_tag.attrs['alt']
image_res = requests.get(image_url) # download the image url and store in image_res obj.
image_res.raise_for_status() # return 200 for a successful url download
# save image url
image_file = open(title_url + '.png', 'wb') # open the file in write binary mode by passing 'wb' in the second argument
for chunk in image_res.iter_content(100000): # each chunk of 100000 bytes of image_res returned from each iteration
image_file.write(chunk) # write() returns the number of bytes as chunk written into image_file
image_file.close()
# get previous url
prev_link = code_text.select('nav[role="group"]')[0].contents[1].contents[3].attrs['href']
url = 'https://www.gocomics.com' + prev_link
print('Previous page ' + str(int(i+1)) + ' is: ' + url)
| 51.558824 | 164 | 0.654877 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 961 | 0.548203 |
8015157b8348b958cf26b731db3111632a7f60c1 | 2,106 | py | Python | cohesity_management_sdk/models/tenant_proxy.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 18 | 2019-09-24T17:35:53.000Z | 2022-03-25T08:08:47.000Z | cohesity_management_sdk/models/tenant_proxy.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 18 | 2019-03-29T19:32:29.000Z | 2022-01-03T23:16:45.000Z | cohesity_management_sdk/models/tenant_proxy.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 16 | 2019-02-27T06:54:12.000Z | 2021-11-16T18:10:24.000Z | # -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class TenantProxy(object):
"""Implementation of the 'TenantProxy' model.
Specifies the data for tenant proxy which has been deployed in tenant's
enviroment.
Attributes:
constituent_id (long|int): Specifies the constituent id of the proxy.
ip_address (string): Specifies the ip address of the proxy.
tenant_id (string): Specifies the unique id of the tenant.
version (string): Specifies the version of the proxy.
"""
# Create a mapping from Model property names to API property names
_names = {
"constituent_id":'constituentId',
"ip_address":'ipAddress',
"tenant_id":'tenantId',
"version":'version'
}
def __init__(self,
constituent_id=None,
ip_address=None,
tenant_id=None,
version=None):
"""Constructor for the TenantProxy class"""
# Initialize members of the class
self.constituent_id = constituent_id
self.ip_address = ip_address
self.tenant_id = tenant_id
self.version = version
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
constituent_id = dictionary.get('constituentId')
ip_address = dictionary.get('ipAddress')
tenant_id = dictionary.get('tenantId')
version = dictionary.get('version')
# Return an object of this model
return cls(constituent_id,
ip_address,
tenant_id,
version)
| 29.661972 | 81 | 0.606363 | 2,046 | 0.97151 | 0 | 0 | 936 | 0.444444 | 0 | 0 | 1,224 | 0.581197 |
8015962c36f7108badf443ec7534f5753cd6e921 | 2,101 | py | Python | test/test_cores/test_video/test_lt24lcdsys.py | meetps/rhea | f8a9a08fb5e14c5c4488ef68a2dff4d18222c2c0 | [
"MIT"
] | 1 | 2022-03-16T23:56:09.000Z | 2022-03-16T23:56:09.000Z | test/test_cores/test_video/test_lt24lcdsys.py | meetps/rhea | f8a9a08fb5e14c5c4488ef68a2dff4d18222c2c0 | [
"MIT"
] | null | null | null | test/test_cores/test_video/test_lt24lcdsys.py | meetps/rhea | f8a9a08fb5e14c5c4488ef68a2dff4d18222c2c0 | [
"MIT"
] | null | null | null |
from __future__ import print_function
from argparse import Namespace
# a video display model to check timing
import pytest
from myhdl import Signal, intbv, instance, delay, StopSimulation, now
from rhea.system import Clock, Reset, Global
from rhea.cores.video.lcd import LT24Interface
from rhea.models.video import LT24LCDDisplay
from rhea.utils.test import run_testbench, tb_args
from mm_lt24lcdsys import mm_lt24lcdsys
from mm_lt24lcdsys import convert
@pytest.mark.skipif(True, reason="pytest issue/error 10x runtime")
def test_lt24lcd():
args = Namespace()
tb_lt24lcd(args=args)
def tb_lt24lcd(args=None):
clock = Clock(0, frequency=50e6)
reset = Reset(0, active=0, async=True)
glbl = Global(clock, reset)
lcd_on = Signal(bool(0))
lcd_resetn = Signal(bool(0))
lcd_csn = Signal(bool(0))
lcd_rs = Signal(bool(0))
lcd_wrn = Signal(bool(0))
lcd_rdn = Signal(bool(0))
lcd_data = Signal(intbv(0)[16:])
lcd = LT24Interface()
resolution = lcd.resolution
color_depth = lcd.color_depth
# assign the ports to the interface
lcd.assign(lcd_on, lcd_resetn, lcd_csn, lcd_rs, lcd_wrn,
lcd_rdn, lcd_data)
mvd = LT24LCDDisplay()
def _bench_lt24lcdsys():
tbdut = mm_lt24lcdsys(clock, reset, lcd_on, lcd_resetn,
lcd_csn, lcd_rs, lcd_wrn, lcd_rdn,
lcd_data)
tbvd = mvd.process(glbl, lcd) # LCD display model
tbclk = clock.gen()
@instance
def tbstim():
yield reset.pulse(33)
yield clock.posedge
timeout = 33000
while mvd.update_cnt < 3 and timeout > 0:
yield delay(1000)
timeout -= 1
yield delay(100)
print("{:<10d}: simulation real time {}".format(now(), mvd.get_time()))
raise StopSimulation
return tbdut, tbvd, tbclk, tbstim
run_testbench(_bench_lt24lcdsys)
def test_conversion():
convert()
if __name__ == '__main__':
tb_lt24lcd(tb_args())
test_conversion()
| 26.2625 | 83 | 0.640647 | 0 | 0 | 1,383 | 0.658258 | 524 | 0.249405 | 0 | 0 | 170 | 0.080914 |
80178d726d35bfda33f77aca84b7fdccd2b6d2ea | 253 | py | Python | src/fl_simulation/server/aggregation/__init__.py | microsoft/fl-simulation | d177d329c82559c7efe82deae8dea8f9baa49495 | [
"MIT"
] | 5 | 2021-12-14T02:21:53.000Z | 2021-12-26T07:45:13.000Z | src/fl_simulation/server/aggregation/__init__.py | microsoft/fl-simulation | d177d329c82559c7efe82deae8dea8f9baa49495 | [
"MIT"
] | 1 | 2022-01-04T04:51:20.000Z | 2022-01-04T04:51:20.000Z | src/fl_simulation/server/aggregation/__init__.py | microsoft/fl-simulation | d177d329c82559c7efe82deae8dea8f9baa49495 | [
"MIT"
] | null | null | null | """Utilities and implementation for model aggregation on the central server."""
from .aggregator import *
from .fedavg import *
from .fedprox import *
from .scaffold import *
from .aggregator_with_dropouts import *
from .multi_model_aggregator import * | 31.625 | 79 | 0.790514 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.312253 |
801a9b75ed3372642f8c99366580172389900495 | 9,912 | py | Python | neurolang/frontend/neurosynth_utils.py | gzanitti/NeuroLang | 497d3d28b640329771e34d92ccec93f984c3f784 | [
"BSD-3-Clause"
] | 1 | 2021-01-07T02:00:22.000Z | 2021-01-07T02:00:22.000Z | neurolang/frontend/neurosynth_utils.py | NeuroLang/NeuroLang | 282457a48722741d577b69980be0a46f69c9954f | [
"BSD-3-Clause"
] | 207 | 2020-11-04T12:51:10.000Z | 2022-03-30T13:42:26.000Z | neurolang/frontend/neurosynth_utils.py | jonasrenault/NeuroLang | 497d3d28b640329771e34d92ccec93f984c3f784 | [
"BSD-3-Clause"
] | 6 | 2020-11-04T13:59:35.000Z | 2021-03-19T05:28:10.000Z | from pathlib import Path
from typing import Optional
import numpy as np
import pandas as pd
from nilearn.datasets.utils import _fetch_files
from scipy import sparse
class StudyID(str):
pass
class TfIDf(float):
pass
NS_DATA_URL = "https://github.com/neurosynth/neurosynth-data/raw/master/"
def fetch_study_metadata(
data_dir: Path, version: int = 7, verbose: int = 1
) -> pd.DataFrame:
"""
Download if needed the `metadata.tsv.gz` file from Neurosynth and load
it into a pandas DataFrame.
The metadata table contains the metadata for each study. Each study (ID)
is stored on its own line. These IDs are in the same order as the id
column of the associated `coordinates.tsv.gz` file, but the rows will
differ because the coordinates file will contain multiple rows per
study. They are also in the same order as the rows in the
`features.npz` files for the same version.
The metadata will therefore have N rows, N being the number of studies
in the Neurosynth dataset. The columns (for version 7) are:
- id
- doi
- space
- title
- authors
- year
- journal
Parameters
----------
data_dir : Path
the path for the directory where downloaded data should be saved.
version : int, optional
the neurosynth data version, by default 7
verbose : int, optional
verbose param for nilearn's `_fetch_files`, by default 1
Returns
-------
pd.DataFrame
the study metadata dataframe
"""
metadata_filename = f"data-neurosynth_version-{version}_metadata.tsv.gz"
metadata_file = _fetch_files(
data_dir,
[
(
metadata_filename,
NS_DATA_URL + metadata_filename,
{},
),
],
verbose=verbose,
)[0]
metadata = pd.read_table(metadata_file)
return metadata
def fetch_feature_data(
data_dir: Path,
version: int = 7,
verbose: int = 1,
convert_study_ids: bool = False,
) -> pd.DataFrame:
"""
Download if needed the `tfidf_features.npz` file from Neurosynth and
load it into a pandas Dataframe. The `tfidf_features` contains feature
values for different types of "vocabularies".
The features dataframe is stored as a compressed, sparse matrix.
Once loaded and reconstructed into a dense matrix, it contains one row
per study and one column per label. The associated labels are loaded,
as well as the study ids, to reconstruct a dataframe of size N x P,
where N is the number of studies in the Neurosynth dataset, and P is
the number of words in the vocabulary.
Parameters
----------
data_dir : Path
the path for the directory where downloaded data should be saved.
version : int, optional
the neurosynth data version, by default 7
verbose : int, optional
verbose param for nilearn's `_fetch_files`, by default 1
convert_study_ids : bool, optional
if True, cast study ids as `StudyID`, by default False
Returns
-------
pd.DataFrame
the features dataframe
"""
file_names = [
f"data-neurosynth_version-{version}_vocab-terms_source-abstract_type-tfidf_features.npz",
f"data-neurosynth_version-{version}_vocab-terms_vocabulary.txt",
]
files = _fetch_files(
data_dir,
[
(
fn,
NS_DATA_URL + fn,
{},
)
for fn in file_names
],
verbose=verbose,
)
feature_data_sparse = sparse.load_npz(files[0])
feature_data = feature_data_sparse.todense()
metadata_df = fetch_study_metadata(data_dir, version, verbose)
ids = metadata_df["id"]
if convert_study_ids:
ids = ids.apply(StudyID)
feature_names = np.genfromtxt(
files[1],
dtype=str,
delimiter="\t",
).tolist()
feature_df = pd.DataFrame(
index=ids.tolist(), columns=feature_names, data=feature_data
)
return feature_df
def fetch_neurosynth_peak_data(
data_dir: Path,
version: int = 7,
verbose: int = 1,
convert_study_ids: bool = False,
) -> pd.DataFrame:
"""
Download if needed the `coordinates.tsv.gz` file from Neurosynth and
load it into a pandas DataFrame.
The `coordinates.tsv.gz` contains the coordinates for the peaks
reported by studies in the Neurosynth dataset. It contains one row per
coordinate reported.
The metadata for each study is also loaded to include the space in
which the coordinates are reported. The peak_data dataframe therefore
has PR rows, PR being the number of reported peaks in the Neurosynth
dataset.
The columns (for version 7) are:
- id
- table_id
- table_num
- peak_id
- space
- x
- y
- z
Parameters
----------
data_dir : Path
the path for the directory where downloaded data should be saved.
version : int, optional
the neurosynth data version, by default 7
verbose : int, optional
verbose param for nilearn's `_fetch_files`, by default 1
convert_study_ids : bool, optional
if True, cast study ids as `StudyID`, by default False
Returns
-------
pd.DataFrame
the peak dataframe
"""
coordinates_filename = (
f"data-neurosynth_version-{version}_coordinates.tsv.gz"
)
coordinates_file = _fetch_files(
data_dir,
[
(
coordinates_filename,
NS_DATA_URL + coordinates_filename,
{},
),
],
verbose=verbose,
)[0]
activations = pd.read_table(coordinates_file)
metadata = fetch_study_metadata(data_dir, version, verbose)
activations = activations.join(
metadata[["id", "space"]].set_index("id"), on="id"
)
if convert_study_ids:
activations["id"] = activations["id"].apply(StudyID)
return activations
def get_ns_term_study_associations(
data_dir: Path,
version: int = 7,
verbose: int = 1,
convert_study_ids: bool = False,
tfidf_threshold: Optional[float] = None,
) -> pd.DataFrame:
"""
Load a dataframe containing associations between term and studies.
The dataframe contains one row for each term and study pair from the
features table in the Neurosynth dataset. With each (term, study) pair
comes the tfidf value for the term in the study.
If a tfidf threshold value is passed, only (term, study) associations
with a tfidf value > tfidf_threshold will be kept.
Parameters
----------
data_dir : Path
the path for the directory where downloaded data should be saved.
version : int, optional
the neurosynth data version, by default 7
verbose : int, optional
verbose param for nilearn's `_fetch_files`, by default 1
convert_study_ids : bool, optional
if True, cast study ids as `StudyID`, by default False
tfidf_threshold : Optional[float], optional
the minimum tfidf value for the (term, study) associations,
by default None
Returns
-------
pd.DataFrame
the term association dataframe
"""
features = fetch_feature_data(
data_dir, version, verbose, convert_study_ids
)
features.index.name = "id"
term_data = pd.melt(
features.reset_index(),
var_name="term",
id_vars="id",
value_name="tfidf",
)
if tfidf_threshold is not None:
term_data = term_data.query(f"tfidf > {tfidf_threshold}")
else:
term_data = term_data.query("tfidf > 0")
return term_data
def get_ns_mni_peaks_reported(
data_dir: Path,
version: int = 7,
verbose: int = 1,
convert_study_ids: bool = False,
) -> pd.DataFrame:
"""
Load a dataframe containing the coordinates for the peaks reported by
studies in the Neurosynth dataset. Coordinates for the peaks are in
MNI space, with coordinates that are reported in Talaraich space
converted.
The resulting dataframe contains one row for each peak reported. Each
row has 4 columns:
- id
- x
- y
- z
Parameters
----------
data_dir : Path
the path for the directory where downloaded data should be saved.
version : int, optional
the neurosynth data version, by default 7
verbose : int, optional
verbose param for nilearn's `_fetch_files`, by default 1
convert_study_ids : bool, optional
if True, cast study ids as `StudyID`, by default False
Returns
-------
pd.DataFrame
the peak dataframe
"""
activations = fetch_neurosynth_peak_data(
data_dir, version, verbose, convert_study_ids
)
mni_peaks = activations.loc[activations.space == "MNI"][
["x", "y", "z", "id"]
]
non_mni_peaks = activations.loc[activations.space == "TAL"][
["x", "y", "z", "id"]
]
proj_mat = np.linalg.pinv(
np.array(
[
[0.9254, 0.0024, -0.0118, -1.0207],
[-0.0048, 0.9316, -0.0871, -1.7667],
[0.0152, 0.0883, 0.8924, 4.0926],
[0.0, 0.0, 0.0, 1.0],
]
).T
)
projected = np.round(
np.dot(
np.hstack(
(
non_mni_peaks[["x", "y", "z"]].values,
np.ones((len(non_mni_peaks), 1)),
)
),
proj_mat,
)[:, 0:3]
)
projected_df = pd.DataFrame(
np.hstack([projected, non_mni_peaks[["id"]].values]),
columns=["x", "y", "z", "id"],
)
peak_data = pd.concat([projected_df, mni_peaks]).astype(
{"x": int, "y": int, "z": int}
)
return peak_data
| 29.58806 | 97 | 0.617736 | 56 | 0.00565 | 0 | 0 | 0 | 0 | 0 | 0 | 5,753 | 0.580408 |
801c4a110403ae871b4a443a0d0c405bac55da7d | 957 | py | Python | lib/sqlalchemy/util/typing.py | immerrr/sqlalchemy | 995fb577a64061a9cbab62b481c65a4c4d3e5a67 | [
"MIT"
] | 1 | 2020-07-21T16:06:40.000Z | 2020-07-21T16:06:40.000Z | lib/sqlalchemy/util/typing.py | taogeYT/sqlalchemy | e88dc004e6bcd1418cb8eb811d0aa580c2a44b8f | [
"MIT"
] | 4 | 2020-04-23T19:00:28.000Z | 2021-09-28T18:14:58.000Z | lib/sqlalchemy/util/typing.py | taogeYT/sqlalchemy | e88dc004e6bcd1418cb8eb811d0aa580c2a44b8f | [
"MIT"
] | null | null | null | from typing import Any
from typing import Generic
from typing import overload
from typing import Type
from typing import TypeVar
from . import compat
if compat.py38:
from typing import Literal
from typing import Protocol
from typing import TypedDict
else:
from typing_extensions import Literal # noqa
from typing_extensions import Protocol # noqa
from typing_extensions import TypedDict # noqa
if compat.py311:
from typing import NotRequired # noqa
else:
from typing_extensions import NotRequired # noqa
_T = TypeVar("_T")
class _TypeToInstance(Generic[_T]):
@overload
def __get__(self, instance: None, owner: Any) -> Type[_T]:
...
@overload
def __get__(self, instance: object, owner: Any) -> _T:
...
@overload
def __set__(self, instance: None, value: Type[_T]) -> None:
...
@overload
def __set__(self, instance: object, value: _T) -> None:
...
| 21.75 | 63 | 0.679206 | 388 | 0.405434 | 0 | 0 | 330 | 0.344828 | 0 | 0 | 34 | 0.035528 |
801d29ea0f445cc4a015a6b4894791ed1ccb9a07 | 563 | py | Python | ep_ws/build/catkin_generated/order_packages.py | fsrlab/FSR_ROS_SIM | f22dfbd19ca1f2f1c7456fc51fb382509f9d7c62 | [
"MIT"
] | null | null | null | ep_ws/build/catkin_generated/order_packages.py | fsrlab/FSR_ROS_SIM | f22dfbd19ca1f2f1c7456fc51fb382509f9d7c62 | [
"MIT"
] | null | null | null | ep_ws/build/catkin_generated/order_packages.py | fsrlab/FSR_ROS_SIM | f22dfbd19ca1f2f1c7456fc51fb382509f9d7c62 | [
"MIT"
] | null | null | null | # generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = '/home/sim2real/ep_ws/src'
whitelisted_packages = ''.split(';') if '' != '' else []
blacklisted_packages = ''.split(';') if '' != '' else []
underlay_workspaces = '/home/sim2real/carto_ws/devel_isolated/cartographer_rviz;/home/sim2real/carto_ws/install_isolated;/home/sim2real/ep_ws/devel;/opt/ros/noetic'.split(';') if '/home/sim2real/carto_ws/devel_isolated/cartographer_rviz;/home/sim2real/carto_ws/install_isolated;/home/sim2real/ep_ws/devel;/opt/ros/noetic' != '' else []
| 93.833333 | 335 | 0.756661 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 400 | 0.71048 |
801d907dbd6651a1c3f1baa79169bbd085c486ee | 2,682 | py | Python | request_signer/tests/test_response.py | imtapps/django-request-signer | b059d021b6e068245030ab682c2cff4318c83ca6 | [
"BSD-2-Clause"
] | 1 | 2017-01-23T19:21:23.000Z | 2017-01-23T19:21:23.000Z | request_signer/tests/test_response.py | imtapps/django-request-signer | b059d021b6e068245030ab682c2cff4318c83ca6 | [
"BSD-2-Clause"
] | 14 | 2016-01-21T17:18:21.000Z | 2022-02-09T19:21:59.000Z | request_signer/tests/test_response.py | imtapps/django-request-signer | b059d021b6e068245030ab682c2cff4318c83ca6 | [
"BSD-2-Clause"
] | 3 | 2016-01-25T19:32:21.000Z | 2016-08-23T15:37:38.000Z | import six
if six.PY3:
from unittest import mock
from io import StringIO
else:
import mock
from cStringIO import StringIO
from http.client import responses
import json
from django import test
from request_signer.client.generic import Response
class ResponseTests(test.TestCase):
def setUp(self):
self.raw_response = mock.Mock()
self.response = Response(self.raw_response)
def test_response_requires_url_to_init(self):
self.assertEqual(self.response.raw_response, self.raw_response)
@mock.patch.object(Response, '_evaluate_response_code_for_success')
def test_response_is_successful_returns_value_from_evaluate(self, evaluate_response):
self.assertEqual(self.response.is_successful, evaluate_response.return_value)
@mock.patch.object(Response, 'status_code', mock.Mock())
@mock.patch.object(Response, '_evaluate_response_code_for_success')
def test_response_is_successful_calls_evaluate_with_status_code(self, evaluate_response):
getattr(self.response, 'is_successful')
evaluate_response.assert_called_once_with(self.response.status_code)
def test_bad_http_status_return_false_from_evaluate_response_code_for_success(self):
include_status = lambda status: status < 200 or status > 299
self.evaluate_response_code_for_success(False, include_status)
def test_good_http_status_return_true_from_evaluate_response_code_for_success(self):
include_status = lambda status: 199 < status < 300
self.evaluate_response_code_for_success(True, include_status)
def evaluate_response_code_for_success(self, expected, include_status):
statuses = (status for status in responses.keys() if include_status(status))
for response_code in statuses:
value = self.response._evaluate_response_code_for_success(response_code)
message = "it seems '%s' returned '%s' for some odd reason" % (response_code, value)
self.assertEqual(expected, value, message)
def test_status_code_returns_status_code_from_raw_response(self):
self.raw_response.code = 201
self.assertEqual(201, self.response.status_code)
def test_returns_dict_of_json_data_from_response(self):
self.raw_response.read.return_value = '{"first":"item"}'
self.assertEqual(dict(first='item'), self.response.json)
def test_can_read_response_multiple_times(self):
data = '{"data": "this is the response"}'
expected = json.loads(data)
self.response.raw_response = StringIO(data)
self.assertEqual(expected, self.response.json)
self.assertEqual(expected, self.response.json)
| 42.571429 | 96 | 0.751305 | 2,419 | 0.901939 | 0 | 0 | 590 | 0.219985 | 0 | 0 | 209 | 0.077927 |
801e841d4330b508f4fb6db780e54413e2c8e289 | 3,373 | py | Python | rl_groundup/temporal_difference_methods/n_step_tree_backup.py | TristanBester/rl_groundup | 2e981667e21330a35a6ab2a642e278aaaf4dca84 | [
"MIT"
] | 1 | 2021-04-20T00:43:43.000Z | 2021-04-20T00:43:43.000Z | rl_groundup/temporal_difference_methods/n_step_tree_backup.py | TristanBester/rl_groundup | 2e981667e21330a35a6ab2a642e278aaaf4dca84 | [
"MIT"
] | null | null | null | rl_groundup/temporal_difference_methods/n_step_tree_backup.py | TristanBester/rl_groundup | 2e981667e21330a35a6ab2a642e278aaaf4dca84 | [
"MIT"
] | null | null | null | # Created by Tristan Bester.
import sys
import numpy as np
sys.path.append('../')
from envs import GridWorld
from itertools import product
from utils import print_episode, eps_greedy_policy, test_policy
'''
n-step Tree Backup used to estimate the optimal policy for
the gridworld environment defined on page 48 of
"Reinforcement Learning: An Introduction."
Algorithm available on page 125.
Book reference:
Sutton, R. and Barto, A., 2014. Reinforcement Learning:
An Introduction. 1st ed. London: The MIT Press.
'''
def policy_proba(policy, s, a, epsilon):
'''Return the probability of the given epsilon-greedy policy
taking the specified action in the specified state.'''
if policy[s] == a:
return (epsilon/4) + (1-epsilon)
else:
return epsilon/4
def n_step_tree_backup(env, n, alpha, gamma, epsilon, n_episodes):
# Initialize policy and state-action value function.
sa_pairs = product(range(env.observation_space_size),\
range(env.action_space_size))
Q = dict.fromkeys(sa_pairs, 0.0)
policy = dict.fromkeys(range(env.observation_space_size), 0)
states = np.zeros(n)
actions = np.zeros(n)
Qs = np.zeros(n)
deltas = np.zeros(n)
pis = np.zeros(n)
decay = lambda x: x-2/n_episodes if x-2/n_episodes > 0.1 else 0.1
for episode in range(n_episodes):
done = False
obs = env.reset()
action = eps_greedy_policy(Q, obs, epsilon, env.action_space_size)
states[0] = obs
actions[0] = action
Qs[0] = Q[obs, action]
t = -1
tau = -1
T = np.inf
while not done or t != T-1:
t += 1
if t < T:
obs_prime, reward, done = env.step(action)
states[(t+1)%n] = obs_prime
if done:
T = t+1
deltas[t%n] = reward - Qs[t%n]
else:
deltas[t%n] = reward + gamma * \
np.sum([policy_proba(policy, obs_prime, i, epsilon) * \
Q[obs_prime, i] for i in range(4)]) - Qs[t%n]
action = eps_greedy_policy(Q, obs_prime, epsilon, \
env.action_space_size)
Qs[(t+1)%n] = Q[obs_prime, action]
pis[(t+1)%n] = policy_proba(policy, obs_prime, action, epsilon)
tau = t-n+1
if tau > -1:
Z = 1
G = Qs[tau%n]
for k in range(tau,min(tau+n-1, T-1)):
G += Z*deltas[k%n]
Z *= gamma * Z * pis[(k+1)%n]
s = states[tau%n]
a = actions[tau%n]
# Update state-action value function.
Q[s,a] += alpha * (G - Q[s,a])
# Make policy greedy w.r.t. Q.
action_values = [Q[s,i] for i in range(4)]
policy[s] = np.argmax(action_values)
epsilon = decay(epsilon)
if episode % 100 == 0:
print_episode(episode,n_episodes)
print_episode(n_episodes, n_episodes)
return policy
if __name__ == '__main__':
n = 4
alpha = 0.01
gamma = 1
epsilon = 1
n_episodes = 1000
env = GridWorld()
policy = n_step_tree_backup(env, n , alpha, gamma, epsilon, n_episodes)
test_policy(env, policy, 10)
| 33.39604 | 83 | 0.54877 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 592 | 0.175511 |
801ef5016fb2c51cff05eabfc2b10dedfdd933d5 | 240 | py | Python | setup.py | deephyper/metalgpy | 73393335c910757f2289414cdf807766e579e1e2 | [
"BSD-2-Clause"
] | null | null | null | setup.py | deephyper/metalgpy | 73393335c910757f2289414cdf807766e579e1e2 | [
"BSD-2-Clause"
] | null | null | null | setup.py | deephyper/metalgpy | 73393335c910757f2289414cdf807766e579e1e2 | [
"BSD-2-Clause"
] | null | null | null | from setuptools import setup, find_packages
# What packages are required for this module to be executed?
REQUIRED = [
"dm-tree",
"numpy",
"scipy"
]
setup(name="metalgpy", packages=find_packages(), install_requires=REQUIRED)
| 18.461538 | 75 | 0.716667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 0.3875 |
80212ffcd9bb037dec9bef2eb1d68bc81a8baec9 | 1,722 | py | Python | src/numbasub/nonumba.py | ptooley/numbasub | b58e66f02672650d87477a0dc92a179060a710b3 | [
"MIT"
] | 3 | 2018-07-26T16:42:25.000Z | 2022-01-18T02:15:01.000Z | src/numbasub/nonumba.py | ptooley/numbasub | b58e66f02672650d87477a0dc92a179060a710b3 | [
"MIT"
] | null | null | null | src/numbasub/nonumba.py | ptooley/numbasub | b58e66f02672650d87477a0dc92a179060a710b3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import functools
#https://stackoverflow.com/questions/3888158
def optional_arg_decorator(fn):
@functools.wraps(fn)
def wrapped_decorator(*args, **kwargs):
# is_bound_method = hasattr(args[0], fn.__name__) if args else False
# if is_bound_method:
# klass = args[0]
# args = args[1:]
# If no arguments were passed...
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# if is_bound_method:
# return fn(klass, args[0])
# else:
return fn(args[0])
else:
def real_decorator(decoratee):
# if is_bound_method:
# return fn(klass, decoratee, *args, **kwargs)
# else:
return fn(decoratee, *args, **kwargs)
return real_decorator
return wrapped_decorator
@optional_arg_decorator
def __noop(func, *args, **kwargs):
return(func)
autojit = __noop
generated_jit = __noop
guvectorize = __noop
jit = __noop
jitclass = __noop
njit = __noop
vectorize = __noop
b1 = None
bool_ = None
boolean = None
byte = None
c16 = None
c8 = None
char = None
complex128 = None
complex64 = None
double = None
f4 = None
f8 = None
ffi = None
ffi_forced_object = None
float32 = None
float64 = None
float_ = None
i1 = None
i2 = None
i4 = None
i8 = None
int16 = None
int32 = None
int64 = None
int8 = None
int_ = None
intc = None
intp = None
long_ = None
longlong = None
none = None
short = None
u1 = None
u2 = None
u4 = None
u8 = None
uchar = None
uint = None
uint16 = None
uint32 = None
uint64 = None
uint8 = None
uintc = None
uintp = None
ulong = None
ulonglong = None
ushort = None
void = None
| 18.923077 | 75 | 0.621951 | 0 | 0 | 0 | 0 | 821 | 0.476771 | 0 | 0 | 472 | 0.2741 |
8022f771c37a2c17506b1b5ad623309f807eb9bd | 1,552 | py | Python | setup.py | FoxNerdSaysMoo/HomeAssistantAPI | 69b175141fa4aaed3a0c0d33a8bc9e8cc56caf6a | [
"MIT"
] | null | null | null | setup.py | FoxNerdSaysMoo/HomeAssistantAPI | 69b175141fa4aaed3a0c0d33a8bc9e8cc56caf6a | [
"MIT"
] | null | null | null | setup.py | FoxNerdSaysMoo/HomeAssistantAPI | 69b175141fa4aaed3a0c0d33a8bc9e8cc56caf6a | [
"MIT"
] | null | null | null | from setuptools import setup
from homeassistant_api import __version__
with open("README.md", "r") as f:
read = f.read()
setup(
name="HomeAssistant API",
url="https://github.com/GrandMoff100/HomeassistantAPI",
description="Python Wrapper for Homeassistant's REST API",
version=__version__,
keywords=['homeassistant', 'api', 'wrapper', 'client'],
author="GrandMoff100",
author_email="nlarsen23.student@gmail.com",
packages=[
"homeassistant_api",
"homeassistant_api.models",
"homeassistant_api._async",
"homeassistant_api._async.models"
],
long_description=read,
long_description_content_type="text/markdown",
install_requires=["requests", "simplejson"],
extras_require={
"async": ["aiohttp"]
},
python_requires=">=3.6",
provides=["homeassistant_api"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Version Control :: Git"
]
)
| 34.488889 | 71 | 0.631443 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 927 | 0.597294 |
8023bf2679045e3bcb91e9b2173b66025aa99f9a | 2,316 | py | Python | analogy/collision_detection/triangle_col_detect.py | gandalf15/analogy | a687496e45557084676c430a61e6dfd0e8233018 | [
"BSD-3-Clause"
] | 1 | 2019-05-06T09:57:21.000Z | 2019-05-06T09:57:21.000Z | analogy/collision_detection/triangle_col_detect.py | gandalf15/analogy | a687496e45557084676c430a61e6dfd0e8233018 | [
"BSD-3-Clause"
] | null | null | null | analogy/collision_detection/triangle_col_detect.py | gandalf15/analogy | a687496e45557084676c430a61e6dfd0e8233018 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import ctypes
import os
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
C_MOLLERS = ctypes.CDLL(os.path.join(CURRENT_PATH, 'build/mollers_tri_tri.so'))
C_DEVILLERS = ctypes.CDLL(
os.path.join(CURRENT_PATH, 'build/devillers_tri_tri.so'))
def mollers_alg(tri_1, tri_2):
"""
Wrapper for the mollers algorithm that is implemented in C.
Args:
tri_1(list): A list of 3 lists with space coordinates in 3D space.
tri_2(list): A list of 3 lists with space coordinates in 3D space.
Returns:
True if two triangles collide.
"""
global C_MOLLERS
# int NoDivTriTriIsect(float V0[3],float V1[3],float V2[3],
# float U0[3],float U1[3],float U2[3])
three_floats_arr = ctypes.c_float * 3
c_v0 = three_floats_arr(tri_1[0][0], tri_1[0][1], tri_1[0][2])
c_v1 = three_floats_arr(tri_1[1][0], tri_1[1][1], tri_1[1][2])
c_v2 = three_floats_arr(tri_1[2][0], tri_1[2][1], tri_1[2][2])
c_u0 = three_floats_arr(tri_2[0][0], tri_2[0][1], tri_2[0][2])
c_u1 = three_floats_arr(tri_2[1][0], tri_2[1][1], tri_2[1][2])
c_u2 = three_floats_arr(tri_2[2][0], tri_2[2][1], tri_2[2][2])
collision = C_MOLLERS.NoDivTriTriIsect(c_v0, c_v1, c_v2, c_u0, c_u1, c_u2)
return collision
def devillers_alg(tri_1, tri_2):
"""
Wrapper for the devillers algorithm that is implemented in C.
Args:
tri_1(list): A list of 3 lists with space coordinates in 3D space.
tri_2(list): A list of 3 lists with space coordinates in 3D space.
Returns:
True if two triangles collide.
"""
global C_DEVILLERS
# int tri_tri_overlap_test_3d(p1,q1,r1,p2,q2,r2)
three_doubles_arr = ctypes.c_double * 3
c_p1 = three_doubles_arr(tri_1[0][0], tri_1[0][1], tri_1[0][2])
c_q1 = three_doubles_arr(tri_1[1][0], tri_1[1][1], tri_1[1][2])
c_r1 = three_doubles_arr(tri_1[2][0], tri_1[2][1], tri_1[2][2])
c_p2 = three_doubles_arr(tri_2[0][0], tri_2[0][1], tri_2[0][2])
c_q2 = three_doubles_arr(tri_2[1][0], tri_2[1][1], tri_2[1][2])
c_r2 = three_doubles_arr(tri_2[2][0], tri_2[2][1], tri_2[2][2])
collision = C_DEVILLERS.tri_tri_overlap_test_3d(c_p1, c_q1, c_r1, c_p2,
c_q2, c_r2)
return collision | 38.6 | 79 | 0.643351 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 822 | 0.354922 |
802596ce6179e23a0644ac73971b6f2da0840b1d | 5,381 | py | Python | atomic_reactor/plugins/check_and_set_platforms.py | qixiang/atomic-reactor | 050325f6be43f6b9399bf5472b87190ada8305bd | [
"BSD-3-Clause"
] | 113 | 2015-07-23T21:37:07.000Z | 2019-05-28T18:58:26.000Z | atomic_reactor/plugins/check_and_set_platforms.py | qixiang/atomic-reactor | 050325f6be43f6b9399bf5472b87190ada8305bd | [
"BSD-3-Clause"
] | 921 | 2015-07-13T14:25:48.000Z | 2019-05-31T14:57:39.000Z | atomic_reactor/plugins/check_and_set_platforms.py | qixiang/atomic-reactor | 050325f6be43f6b9399bf5472b87190ada8305bd | [
"BSD-3-Clause"
] | 42 | 2015-07-17T12:48:25.000Z | 2019-03-29T07:48:57.000Z | """
Copyright (c) 2018 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
Query the koji build target, if any, to find the enabled architectures. Remove any excluded
architectures, and return the resulting list.
"""
from typing import List, Optional
from atomic_reactor.plugin import Plugin
from atomic_reactor.util import is_scratch_build, is_isolated_build, map_to_user_params
from atomic_reactor.constants import PLUGIN_CHECK_AND_SET_PLATFORMS_KEY
from atomic_reactor.config import get_koji_session
class CheckAndSetPlatformsPlugin(Plugin):
key = PLUGIN_CHECK_AND_SET_PLATFORMS_KEY
is_allowed_to_fail = False
args_from_user_params = map_to_user_params("koji_target")
def __init__(self, workflow, koji_target=None):
"""
constructor
:param workflow: DockerBuildWorkflow instance
:param koji_target: str, Koji build target name
"""
# call parent constructor
super(CheckAndSetPlatformsPlugin, self).__init__(workflow)
self.koji_target = koji_target
def _limit_platforms(self, platforms: List[str]) -> List[str]:
"""Limit platforms in a specific range by platforms config.
:param platforms: a list of platforms to be filtered.
:type platforms: list[str]
:return: the limited platforms.
:rtype: list[str]
"""
final_platforms = set(platforms)
source_config = self.workflow.source.config
only_platforms = set(source_config.only_platforms)
excluded_platforms = set(source_config.excluded_platforms)
if only_platforms:
if only_platforms == excluded_platforms:
self.log.warning('only and not platforms are the same: %r', only_platforms)
final_platforms &= only_platforms
return list(final_platforms - excluded_platforms)
def run(self) -> Optional[List[str]]:
"""
run the plugin
"""
user_platforms: Optional[List[str]] = self.workflow.user_params.get("platforms")
if self.koji_target:
koji_session = get_koji_session(self.workflow.conf)
self.log.info("Checking koji target for platforms")
event_id = koji_session.getLastEvent()['id']
target_info = koji_session.getBuildTarget(self.koji_target, event=event_id)
build_tag = target_info['build_tag']
koji_build_conf = koji_session.getBuildConfig(build_tag, event=event_id)
koji_platforms = koji_build_conf['arches']
if not koji_platforms:
self.log.info("No platforms found in koji target")
return None
platforms = koji_platforms.split()
self.log.info("Koji platforms are %s", sorted(platforms))
if is_scratch_build(self.workflow) or is_isolated_build(self.workflow):
override_platforms = set(user_platforms or [])
if override_platforms and override_platforms != set(platforms):
sorted_platforms = sorted(override_platforms)
self.log.info("Received user specified platforms %s", sorted_platforms)
self.log.info("Using them instead of koji platforms")
# platforms from user params do not match platforms from koji target
# that almost certainly means they were overridden and should be used
self.workflow.build_dir.init_build_dirs(sorted_platforms, self.workflow.source)
return sorted_platforms
else:
platforms = user_platforms
self.log.info(
"No koji platforms. User specified platforms are %s",
sorted(platforms) if platforms else None,
)
if not platforms:
raise RuntimeError("Cannot determine platforms; no koji target or platform list")
# Filter platforms based on configured remote hosts
remote_host_pools = self.workflow.conf.remote_hosts.get("pools", {})
enabled_platforms = []
defined_but_disabled = []
def has_enabled_hosts(platform: str) -> bool:
platform_hosts = remote_host_pools.get(platform, {})
return any(host_info["enabled"] for host_info in platform_hosts.values())
for p in platforms:
if has_enabled_hosts(p):
enabled_platforms.append(p)
elif p in remote_host_pools:
defined_but_disabled.append(p)
else:
self.log.warning("No remote hosts found for platform '%s' in "
"reactor config map, skipping", p)
if defined_but_disabled:
msg = 'Platforms specified in config map, but have all remote hosts disabled' \
' {}'.format(defined_but_disabled)
raise RuntimeError(msg)
final_platforms = self._limit_platforms(enabled_platforms)
self.log.info("platforms in limits : %s", final_platforms)
if not final_platforms:
self.log.error("platforms in limits are empty")
raise RuntimeError("No platforms to build for")
self.workflow.build_dir.init_build_dirs(final_platforms, self.workflow.source)
return final_platforms
| 42.370079 | 99 | 0.65564 | 4,774 | 0.887196 | 0 | 0 | 0 | 0 | 0 | 0 | 1,571 | 0.291953 |
80259a6aaef5b03b1f4997396bef339696417324 | 4,876 | py | Python | py_work/spider/request/GetMolculeUnderTarget.py | kotori-y/kotori_work | 51ebfdf49571ae34c246dc5b37cc86e25f4ccf3d | [
"MIT"
] | 6 | 2020-05-14T09:47:04.000Z | 2021-06-05T03:03:45.000Z | py_work/spider/request/GetMolculeUnderTarget.py | kotori-y/kotori_work | 51ebfdf49571ae34c246dc5b37cc86e25f4ccf3d | [
"MIT"
] | null | null | null | py_work/spider/request/GetMolculeUnderTarget.py | kotori-y/kotori_work | 51ebfdf49571ae34c246dc5b37cc86e25f4ccf3d | [
"MIT"
] | 4 | 2020-04-20T13:17:27.000Z | 2021-08-07T19:44:50.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 14 13:33:17 2020
@Author: Zhi-Jiang Yang, Dong-Sheng Cao
@Institution: CBDD Group, Xiangya School of Pharmaceutical Science, CSU, China
@Homepage: http://www.scbdd.com
@Mail: yzjkid9@gmail.com; oriental-cds@163.com
@Blog: https://blog.iamkotori.com
♥I love Princess Zelda forever♥
"""
from multiprocessing import Pool
import xml.etree.ElementTree as ET
from lxml import etree
from requests import Session
import json
import os
os.chdir(r'')
class MolFromProtein(object):
"""
"""
def __init__(self, UniprotID):
"""
"""
self.UniprotID = UniprotID
self.session = Session()
self.headers = {
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Cookie": "_ga=GA1.3.757562829.1572445921; csrftoken=nEd76UY2CAro6FtS8rAVTvJxWc1ZFy7XBMs3Rltm265uLG4z5wXOHSyDewy8j5Pa; chembl-website-v0.2-data-protection-accepted=true; _gid=GA1.3.302613681.1586835743",
"Host": "www.ebi.ac.uk",
"Origin": "https://www.ebi.ac.uk",
"Referer": "https://www.ebi.ac.uk/chembl/g/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36"
}
def GetInfoFromUniprot(self):
"""
"""
request_url = 'https://www.uniprot.org/uniprot/{}.xml'.format(self.UniprotID)
try:
r = self.session.get(request_url,timeout=30)
if r.status_code == 200:
tree = ET.fromstring(r.text)
entry = tree[0]
dbReference = entry.findall('{http://uniprot.org/uniprot}dbReference[@type="ChEMBL"]')
res = [i.attrib['id'] for i in dbReference]
# print(res)
else:
res = [None]
except:
res = [None]
return ''.join(res)
def GetDownloadID(self):
"""
"""
ChEMBLID = self.GetInfoFromUniprot()
url = 'https://www.ebi.ac.uk/chembl/g/#browse/activities/filter/target_chembl_id%3A{}%20AND%20standard_type%3A(IC50%20OR%20Ki%20OR%20EC50%20OR%20Kd)%20AND%20_exists_%3Astandard_value%20AND%20_exists_%3Aligand_efficiency'.format(ChEMBLID)
# print(url)
html = self.session.get(url, headers=self.headers).text
tree = etree.HTML(html)
token = tree.xpath('//*[@class="GLaDOS-top-s3cre7"]//@value')[0]
# token = token.encode('utf-8').decode('utf-8')
# print(token)
data = {
"csrfmiddlewaretoken": token,
"index_name": "chembl_26_activity",
"query": '{"bool":{"must":[{"query_string":{"analyze_wildcard":true,"query":"target_chembl_id:%s AND standard_type:(IC50 OR Ki OR EC50 OR Kd) AND _exists_:standard_value AND _exists_:ligand_efficiency"}}],"filter":[]}}'%(ChEMBLID),
# "query": '{"bool":{"must":[{"query_string":{"analyze_wildcard": true,"query":"_metadata.related_targets.all_chembl_ids:%s"}}],"filter":[]}}'%(self.GetInfoFromUniprot()),
"format": "CSV",
"context_id": "undefined",
"download_columns_group": "undefined",
}
# print(data['csrfmiddlewaretoken'])
url = 'https://www.ebi.ac.uk/chembl/glados_api/shared/downloads/queue_download/'
response = self.session.post(url, headers=self.headers, data=data)
html = response.text
# return html
# print(json.loads(html)['download_id'])
# print(html)
return json.loads(html)['download_id']
def Download(self):
url = 'https://www.ebi.ac.uk/chembl/dynamic-downloads/%s.gz'%(self.GetDownloadID())
# print(url)
r = self.session.get(url, headers=self.headers)
assert r.status_code == 200
with open('./data/{}.csv.gz'.format(self.UniprotID), 'wb') as f_obj:
for chunk in r.iter_content(chunk_size=512):
f_obj.write(chunk)
f_obj.close()
print('{} Finished'.format(self.UniprotID))
def main(UniprotID):
"""
"""
try:
download = MolFromProtein(UniprotID)
download.Download()
except:
with open('Error.log', 'a') as f_obj:
f_obj.write(UniprotID)
f_obj.write('\n')
f_obj.close()
if '__main__' == __name__:
import pandas as pd
data = pd.read_csv(r'pro_info.csv')
unis = data.uni.tolist()
ps = Pool()
for UniprotID in unis:
ps.apply_async(main, args=(UniprotID, ))
ps.close()
ps.join()
| 33.170068 | 245 | 0.578138 | 3,769 | 0.772336 | 0 | 0 | 0 | 0 | 0 | 0 | 2,275 | 0.466189 |
8025bd0c1885c3866fd320401f51ffbcd535cb06 | 1,954 | py | Python | lr_schedulers/flatten_cosanneal.py | yumatsuoka/SofNDLTeches | 1b36e8f99068e8dd25ebccd4a60ab9375609f359 | [
"MIT"
] | null | null | null | lr_schedulers/flatten_cosanneal.py | yumatsuoka/SofNDLTeches | 1b36e8f99068e8dd25ebccd4a60ab9375609f359 | [
"MIT"
] | null | null | null | lr_schedulers/flatten_cosanneal.py | yumatsuoka/SofNDLTeches | 1b36e8f99068e8dd25ebccd4a60ab9375609f359 | [
"MIT"
] | null | null | null | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# It's written by https://github.com/yumatsuoka
from __future__ import print_function
import math
from torch.optim.lr_scheduler import _LRScheduler
class FlatplusAnneal(_LRScheduler):
def __init__(self, optimizer, max_iter, step_size=0.7, eta_min=0, last_epoch=-1):
self.flat_range = int(max_iter * step_size)
self.T_max = max_iter - self.flat_range
self.eta_min = 0
super(FlatplusAnneal, self).__init__(optimizer, last_epoch)
def get_lr(self):
if self.last_epoch < self.flat_range:
return [base_lr for base_lr in self.base_lrs]
else:
cr_epoch = self.last_epoch - self.flat_range
return [
self.eta_min
+ (base_lr - self.eta_min)
* (1 + math.cos(math.pi * (cr_epoch / self.T_max)))
/ 2
for base_lr in self.base_lrs
]
if __name__ == "__main__":
import torch
# import matplotlib.pyplot as plt
def check_scheduler(optimizer, scheduler, epochs):
lr_list = []
for epoch in range(epochs):
now_lr = scheduler.get_lr()
lr_list.append(now_lr)
optimizer.step()
scheduler.step()
return lr_list
# def show_graph(lr_lists, epochs):
# plt.clf()
# plt.rcParams["figure.figsize"] = [20, 5]
# x = list(range(epochs))
# plt.plot(x, lr_lists, label="line L")
# plt.plot()
# plt.xlabel("iterations")
# plt.ylabel("learning rate")
# plt.title("Check Flat plus cosine annealing lr")
# plt.show()
lr = 0.1
epochs = 100
model = torch.nn.Linear(10, 2)
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
scheduler = FlatplusAnneal(optimizer, max_iter=epochs, step_size=0.7)
lrs = check_scheduler(optimizer, scheduler, epochs)
# show_graph(lrs, epochs)
| 28.735294 | 85 | 0.599795 | 752 | 0.384852 | 0 | 0 | 0 | 0 | 0 | 0 | 468 | 0.239509 |
8025e5f72fc9d4b3c01001445187f2773b458389 | 15,270 | py | Python | pysnmp-with-texts/CISCOSB-RMON.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/CISCOSB-RMON.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/CISCOSB-RMON.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module CISCOSB-RMON (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCOSB-RMON
# Produced by pysmi-0.3.4 at Wed May 1 12:23:18 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint", "ValueRangeConstraint")
switch001, = mibBuilder.importSymbols("CISCOSB-MIB", "switch001")
EntryStatus, OwnerString = mibBuilder.importSymbols("RMON-MIB", "EntryStatus", "OwnerString")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ObjectIdentity, iso, Gauge32, TimeTicks, Counter64, Counter32, Bits, NotificationType, Integer32, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Unsigned32, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "iso", "Gauge32", "TimeTicks", "Counter64", "Counter32", "Bits", "NotificationType", "Integer32", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Unsigned32", "IpAddress")
TruthValue, TextualConvention, RowStatus, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "RowStatus", "DisplayString")
rlRmonControl = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49))
rlRmonControl.setRevisions(('2004-06-01 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: rlRmonControl.setRevisionsDescriptions(('Initial version of this MIB.',))
if mibBuilder.loadTexts: rlRmonControl.setLastUpdated('200406010000Z')
if mibBuilder.loadTexts: rlRmonControl.setOrganization('Cisco Small Business')
if mibBuilder.loadTexts: rlRmonControl.setContactInfo('Postal: 170 West Tasman Drive San Jose , CA 95134-1706 USA Website: Cisco Small Business Home http://www.cisco.com/smb>;, Cisco Small Business Support Community <http://www.cisco.com/go/smallbizsupport>')
if mibBuilder.loadTexts: rlRmonControl.setDescription('The private MIB module definition for switch001 RMON MIB.')
rlRmonControlMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlRmonControlMibVersion.setStatus('current')
if mibBuilder.loadTexts: rlRmonControlMibVersion.setDescription("The MIB's version. The current version is 1")
rlRmonControlHistoryControlQuotaBucket = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(8)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlRmonControlHistoryControlQuotaBucket.setStatus('current')
if mibBuilder.loadTexts: rlRmonControlHistoryControlQuotaBucket.setDescription('Maximum number of buckets to be used by each History Control group entry. changed to read only, value is derived from rsMaxRmonEtherHistoryEntrie')
rlRmonControlHistoryControlMaxGlobalBuckets = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(300)).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlRmonControlHistoryControlMaxGlobalBuckets.setStatus('current')
if mibBuilder.loadTexts: rlRmonControlHistoryControlMaxGlobalBuckets.setDescription('Maximum number of buckets to be used by all History Control group entries together.')
rlHistoryControlTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 4), )
if mibBuilder.loadTexts: rlHistoryControlTable.setStatus('current')
if mibBuilder.loadTexts: rlHistoryControlTable.setDescription('A list of rlHistory control entries. This table is exactly like the corresponding RMON I History control group table, but is used to sample statistics of counters not specified by the RMON I statistics group.')
rlHistoryControlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 4, 1), ).setIndexNames((0, "CISCOSB-RMON", "rlHistoryControlIndex"))
if mibBuilder.loadTexts: rlHistoryControlEntry.setStatus('current')
if mibBuilder.loadTexts: rlHistoryControlEntry.setDescription('A list of parameters that set up a periodic sampling of statistics. As an example, an instance of the rlHistoryControlInterval object might be named rlHistoryControlInterval.2')
rlHistoryControlIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlHistoryControlIndex.setStatus('current')
if mibBuilder.loadTexts: rlHistoryControlIndex.setDescription('An index that uniquely identifies an entry in the rlHistoryControl table. Each such entry defines a set of samples at a particular interval for a sampled counter.')
rlHistoryControlDataSource = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 4, 1, 2), ObjectIdentifier()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlHistoryControlDataSource.setStatus('current')
if mibBuilder.loadTexts: rlHistoryControlDataSource.setDescription('This object identifies the source of the data for which historical data was collected and placed in the rlHistory table. This object may not be modified if the associated rlHistoryControlStatus object is equal to valid(1).')
rlHistoryControlBucketsRequested = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 4, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(50)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlHistoryControlBucketsRequested.setStatus('current')
if mibBuilder.loadTexts: rlHistoryControlBucketsRequested.setDescription('The requested number of discrete time intervals over which data is to be saved in the part of the rlHistory table associated with this rlHistoryControlEntry. When this object is created or modified, the probe should set rlHistoryControlBucketsGranted as closely to this object as is possible for the particular probe implementation and available resources.')
rlHistoryControlBucketsGranted = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 4, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlHistoryControlBucketsGranted.setStatus('current')
if mibBuilder.loadTexts: rlHistoryControlBucketsGranted.setDescription('The number of discrete sampling intervals over which data shall be saved in the part of the rlHistory table associated with this rlHistoryControlEntry. When the associated rlHistoryControlBucketsRequested object is created or modified, the probe should set this object as closely to the requested value as is possible for the particular probe implementation and available resources. The probe must not lower this value except as a result of a modification to the associated rlHistoryControlBucketsRequested object. There will be times when the actual number of buckets associated with this entry is less than the value of this object. In this case, at the end of each sampling interval, a new bucket will be added to the rlHistory table. When the number of buckets reaches the value of this object and a new bucket is to be added to the media-specific table, the oldest bucket associated with this rlHistoryControlEntry shall be deleted by the agent so that the new bucket can be added. When the value of this object changes to a value less than the current value, entries are deleted from the rlHistory table. Enough of the oldest of these entries shall be deleted by the agent so that their number remains less than or equal to the new value of this object. When the value of this object changes to a value greater than the current value, the number of associated rlHistory table entries may be allowed to grow.')
rlHistoryControlInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 4, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 3600)).clone(1800)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlHistoryControlInterval.setStatus('current')
if mibBuilder.loadTexts: rlHistoryControlInterval.setDescription('The interval in seconds over which the data is sampled for each bucket in the part of the rlHistory table associated with this rlHistoryControlEntry. This interval can be set to any number of seconds between 1 and 3600 (1 hour). Because the counters in a bucket may overflow at their maximum value with no indication, a prudent manager will take into account the possibility of overflow in any of the associated counters. It is important to consider the minimum time in which any counter could overflow and set the rlHistoryControlInterval object to a value This object may not be modified if the associated rlHistoryControlStatus object is equal to valid(1).')
rlHistoryControlOwner = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 4, 1, 6), OwnerString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlHistoryControlOwner.setStatus('current')
if mibBuilder.loadTexts: rlHistoryControlOwner.setDescription('The entity that configured this entry and is therefore using the resources assigned to it.')
rlHistoryControlStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 4, 1, 7), EntryStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlHistoryControlStatus.setStatus('current')
if mibBuilder.loadTexts: rlHistoryControlStatus.setDescription('The status of this rlHistoryControl entry. Each instance of the rlHistory table associated with this rlHistoryControlEntry will be deleted by the agent if this rlHistoryControlEntry is not equal to valid(1).')
rlHistoryTable = MibTable((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 5), )
if mibBuilder.loadTexts: rlHistoryTable.setStatus('current')
if mibBuilder.loadTexts: rlHistoryTable.setDescription('A list of history entries.')
rlHistoryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 5, 1), ).setIndexNames((0, "CISCOSB-RMON", "rlHistoryIndex"), (0, "CISCOSB-RMON", "rlHistorySampleIndex"))
if mibBuilder.loadTexts: rlHistoryEntry.setStatus('current')
if mibBuilder.loadTexts: rlHistoryEntry.setDescription('An historical statistics sample of a counter specified by the corresponding history control entry. This sample is associated with the rlHistoryControlEntry which set up the parameters for a regular collection of these samples. As an example, an instance of the rlHistoryPkts object might be named rlHistoryPkts.2.89')
rlHistoryIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlHistoryIndex.setStatus('current')
if mibBuilder.loadTexts: rlHistoryIndex.setDescription('The history of which this entry is a part. The history identified by a particular value of this index is the same history as identified by the same value of rlHistoryControlIndex.')
rlHistorySampleIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 5, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlHistorySampleIndex.setStatus('current')
if mibBuilder.loadTexts: rlHistorySampleIndex.setDescription('An index that uniquely identifies the particular sample this entry represents among all samples associated with the same rlHistoryControlEntry. This index starts at 1 and increases by one as each new sample is taken.')
rlHistoryIntervalStart = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 5, 1, 3), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlHistoryIntervalStart.setStatus('current')
if mibBuilder.loadTexts: rlHistoryIntervalStart.setDescription('The value of sysUpTime at the start of the interval over which this sample was measured. If the probe keeps track of the time of day, it should start the first sample of the history at a time such that when the next hour of the day begins, a sample is started at that instant. Note that following this rule may require the probe to delay collecting the first sample of the history, as each sample must be of the same interval. Also note that the sample which is currently being collected is not accessible in this table until the end of its interval.')
rlHistoryValue = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 5, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlHistoryValue.setStatus('current')
if mibBuilder.loadTexts: rlHistoryValue.setDescription('The value of the sampled counter at the time of this sampling.')
rlControlHistoryControlQuotaBucket = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(8)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlControlHistoryControlQuotaBucket.setStatus('current')
if mibBuilder.loadTexts: rlControlHistoryControlQuotaBucket.setDescription('Maximum number of buckets to be used by each rlHistoryControlTable entry.')
rlControlHistoryControlMaxGlobalBuckets = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(300)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlControlHistoryControlMaxGlobalBuckets.setStatus('current')
if mibBuilder.loadTexts: rlControlHistoryControlMaxGlobalBuckets.setDescription('Maximum number of buckets to be used by all rlHistoryControlTable entries together.')
rlControlHistoryMaxEntries = MibScalar((1, 3, 6, 1, 4, 1, 9, 6, 1, 101, 49, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(300)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlControlHistoryMaxEntries.setStatus('current')
if mibBuilder.loadTexts: rlControlHistoryMaxEntries.setDescription('Maximum number of rlHistoryTable entries.')
mibBuilder.exportSymbols("CISCOSB-RMON", rlHistoryControlIndex=rlHistoryControlIndex, rlHistoryTable=rlHistoryTable, rlHistoryControlOwner=rlHistoryControlOwner, rlControlHistoryMaxEntries=rlControlHistoryMaxEntries, rlRmonControl=rlRmonControl, rlHistoryControlBucketsRequested=rlHistoryControlBucketsRequested, rlHistoryValue=rlHistoryValue, rlHistoryControlDataSource=rlHistoryControlDataSource, PYSNMP_MODULE_ID=rlRmonControl, rlControlHistoryControlQuotaBucket=rlControlHistoryControlQuotaBucket, rlHistoryControlEntry=rlHistoryControlEntry, rlRmonControlHistoryControlQuotaBucket=rlRmonControlHistoryControlQuotaBucket, rlHistoryIntervalStart=rlHistoryIntervalStart, rlHistoryEntry=rlHistoryEntry, rlHistoryIndex=rlHistoryIndex, rlHistorySampleIndex=rlHistorySampleIndex, rlHistoryControlBucketsGranted=rlHistoryControlBucketsGranted, rlHistoryControlTable=rlHistoryControlTable, rlControlHistoryControlMaxGlobalBuckets=rlControlHistoryControlMaxGlobalBuckets, rlRmonControlHistoryControlMaxGlobalBuckets=rlRmonControlHistoryControlMaxGlobalBuckets, rlRmonControlMibVersion=rlRmonControlMibVersion, rlHistoryControlStatus=rlHistoryControlStatus, rlHistoryControlInterval=rlHistoryControlInterval)
| 171.573034 | 1,487 | 0.806418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,114 | 0.465881 |
8026986bb4d58676af3e3f51afb0da5721b218c2 | 604 | py | Python | script/lib/git.py | ymmuse/electron-libcc | 364cf9c6044912dc3725f12e7136730937d1a96d | [
"MIT"
] | null | null | null | script/lib/git.py | ymmuse/electron-libcc | 364cf9c6044912dc3725f12e7136730937d1a96d | [
"MIT"
] | null | null | null | script/lib/git.py | ymmuse/electron-libcc | 364cf9c6044912dc3725f12e7136730937d1a96d | [
"MIT"
] | 2 | 2019-09-05T03:27:45.000Z | 2019-10-03T13:02:48.000Z | """Git helper functions.
Everything in here should be project agnostic, shouldn't rely on project's structure,
and make any assumptions about the passed arguments or calls outcomes.
"""
import subprocess
def apply(repo, patch_path, reverse=False):
args = ['git', 'apply',
'--directory', repo,
'--ignore-space-change',
'--ignore-whitespace',
'--whitespace', 'fix'
]
if reverse:
args += ['--reverse']
args += ['--', patch_path]
return_code = subprocess.call(args)
applied_successfully = (return_code == 0)
return applied_successfully
| 24.16 | 85 | 0.64404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 289 | 0.478477 |
8027a68653b6898390007ff7b04b6f4c5243c2d2 | 4,308 | py | Python | model/A2J/a2j_utilities/post_processing.py | NVIDIA-AI-IOT/realtime_handpose_3d | 3f5ae9ccbf07defc39de7ce9e8b2213dda3be375 | [
"MIT"
] | 7 | 2021-01-29T19:45:55.000Z | 2021-12-07T01:23:15.000Z | model/A2J/a2j_utilities/post_processing.py | puruBHU/realtime_handpose_3d | 3f5ae9ccbf07defc39de7ce9e8b2213dda3be375 | [
"MIT"
] | null | null | null | model/A2J/a2j_utilities/post_processing.py | puruBHU/realtime_handpose_3d | 3f5ae9ccbf07defc39de7ce9e8b2213dda3be375 | [
"MIT"
] | 2 | 2021-03-05T11:02:17.000Z | 2021-05-22T02:26:44.000Z | '''
Copyright (c) 2019 Boshen Zhang
Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
# PROJ ROOT DIR
DIR_PATH = os.path.dirname(os.path.abspath(__file__)) # a2j_utilities
A2J_PATH = os.path.join(DIR_PATH, os.path.pardir) # A2J
MODEL_PATH = os.path.join(A2J_PATH, os.path.pardir) # model
ROOT_PATH = os.path.join(MODEL_PATH, os.path.pardir) # root
sys.path.append(ROOT_PATH)
# Import Project Library
import pipeline.constants as const
from model.A2J.a2j_utilities.a2j_utils import generate_anchors, shift
class PostProcess(nn.Module):
"""
PosrProcessing class
"""
def __init__(self, p_h=None, p_w=None, shape=[const.A2J_TARGET_SIZE[1]//16, const.A2J_TARGET_SIZE[0]//16],\
stride=const.A2J_STRIDE):
"""
Class constructior
:param p_w:
"""
super(PostProcess, self).__init__()
anchors = generate_anchors(p_h=p_h, p_w=p_w)
self.all_anchors = torch.from_numpy(shift(shape, stride, anchors)).float()
def forward(self, joint_classifications, offset_regressions, depth_regressions):
"""
forward pass through the module
:param joint_classifications: type torch.tensor, joint classification output of the model
:param offset_regressions: type torch.tensor, offset regression output of the model
:param depth_regressions: type torch.tensor, depth rgression output of the model
"""
DEVICE = joint_classifications.device
batch_size = joint_classifications.shape[0]
anchor = self.all_anchors.to(DEVICE) # (shape[0]*shape[1]*anchor_stride, 2) (1440, 2)
predictions = list()
for i in range(batch_size):
joint_classification = joint_classifications[i] # (shape[0]*shape[1]*anchor_stride, num_joints) (1440, 18)
offset_regression = offset_regressions[i] # (shape[0]*shape[1]*anchor_stride, num_joints, 2) (1440, 18, 2)
depth_regression = depth_regressions[i] # (shape[0]*shape[1]*anchor_stride, num_joits) (1440, 18)
# xy_regression: is the location of each anchor point + the offset
# offset_regression: is giving us the offset
xy_regression = torch.unsqueeze(anchor, 1).to(DEVICE) + offset_regression # (shape[0]*shape[1]*anchor_stride, 2) (1440, 18, 2)
# reg_weight: is gining us the classification (importance) of each anchor point
reg_weight = F.softmax(joint_classification, dim=0) # (shape[0]*shape[1]*anchor_stride, num_joints) (1440, 18)
# reg_weigh_xy: is reg_weight expanded to have to tensors to multiply to each x and y coordinates
reg_weight_xy = reg_weight.unsqueeze(2).expand(reg_weight.shape[0], reg_weight.shape[1], 2).to(DEVICE) # (shape[0]*shape[1]*anchor_stride, num_joints, 2) (1440, 18, 2)
prediction_xy = (reg_weight_xy * xy_regression).sum(0)
prediction_depth = (reg_weight * depth_regression).sum(0)
prediction_depth = prediction_depth.unsqueeze(1).to(DEVICE)
prediction = torch.cat((prediction_xy, prediction_xy), 1)
predictions.append(prediction)
return predictions
| 53.85 | 460 | 0.710074 | 2,674 | 0.620706 | 0 | 0 | 0 | 0 | 0 | 0 | 2,318 | 0.538069 |
8028cc8cedb341229fafd5bad60d6172a2708e24 | 713 | py | Python | edge.py | s1nisteR/opencv-practice | d55e83d39b0d6a1fe7994cb0ac8010b04b6a36a5 | [
"MIT"
] | null | null | null | edge.py | s1nisteR/opencv-practice | d55e83d39b0d6a1fe7994cb0ac8010b04b6a36a5 | [
"MIT"
] | null | null | null | edge.py | s1nisteR/opencv-practice | d55e83d39b0d6a1fe7994cb0ac8010b04b6a36a5 | [
"MIT"
] | null | null | null | import cv2 as cv
import numpy as np
img = cv.imread('Photos/park.jpg')
cv.imshow('Original', img)
grayscale = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow("Jesmin", grayscale)
#Condition for Laplacian: Cannot take negative values
#Laplacian Edge Detection
lap = cv.Laplacian(grayscale, cv.CV_64F)
absouluteLap = np.uint8(np.absolute(lap))
cv.imshow('Laplacian Edge Detection', absouluteLap)
#Sobel Edge Detection
sobelx = cv.Sobel(grayscale, cv.CV_64F, 1, 0)
sobely = cv.Sobel(grayscale, cv.CV_64F, 0, 1)
combined_sobel = cv.bitwise_or(sobelx, sobely)
cv.imshow("Combined Sobel", combined_sobel)
#Canny image
cannyImage = cv.Canny(grayscale, 100, 175)
cv.imshow("Canny Image", cannyImage)
cv.waitKey(0) | 23.766667 | 53 | 0.755961 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.281907 |
8028fc69fc3f5261adc18f715036830ed0ee818b | 1,204 | py | Python | examples/regression.py | laudv/bitboost | 85caa1163a36e2099d0251caa912b28ad5d39f14 | [
"Apache-2.0"
] | 11 | 2019-07-10T12:50:52.000Z | 2021-08-18T03:27:28.000Z | examples/regression.py | laudv/bitboost | 85caa1163a36e2099d0251caa912b28ad5d39f14 | [
"Apache-2.0"
] | null | null | null | examples/regression.py | laudv/bitboost | 85caa1163a36e2099d0251caa912b28ad5d39f14 | [
"Apache-2.0"
] | null | null | null | import sys
import os
import timeit
# use local python package rather than the system install
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../python"))
from bitboost import BitBoostRegressor
import numpy as np
import sklearn.metrics
nfeatures = 5
nexamples = 10000
data = np.random.choice(np.array([0.0, 1.0, 2.0], dtype=BitBoostRegressor.numt),
size=(nexamples * 2, nfeatures))
target = (1.22 * (data[:, 0] > 1.0)
+ 0.65 * (data[:, 1] > 1.0)
+ 0.94 * (data[:, 2] != 2.0)
+ 0.13 * (data[:, 3] == 1.0)).astype(BitBoostRegressor.numt)
dtrain, ytrain = data[0:nexamples, :], target[0:nexamples]
dtest, ytest = data[nexamples:, :], target[nexamples:]
bit = BitBoostRegressor()
bit.objective = "l2"
bit.discr_nbits = 4
bit.max_tree_depth = 5
bit.learning_rate = 0.5
bit.niterations = 50
bit.categorical_features = list(range(nfeatures))
bit.fit(dtrain, ytrain)
train_pred = bit.predict(dtrain)
test_pred = bit.predict(dtest)
train_acc = sklearn.metrics.mean_absolute_error(ytrain, train_pred)
test_acc = sklearn.metrics.mean_absolute_error(ytest, test_pred)
print(f"bit train accuracy: {train_acc}")
print(f"bit test accuracy: {test_acc}")
| 30.1 | 80 | 0.695183 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 138 | 0.114618 |
802907f21f2a3f816bad8d47fc8fb19d552566fb | 676 | py | Python | src/pyth2/io/BinaryStream.py | gnomeberry/pyth2 | 532d89e4ed22b4f9427069bf187ab836e2c2f538 | [
"MIT"
] | null | null | null | src/pyth2/io/BinaryStream.py | gnomeberry/pyth2 | 532d89e4ed22b4f9427069bf187ab836e2c2f538 | [
"MIT"
] | null | null | null | src/pyth2/io/BinaryStream.py | gnomeberry/pyth2 | 532d89e4ed22b4f9427069bf187ab836e2c2f538 | [
"MIT"
] | null | null | null | '''
Created on 2015/11/07
@author: _
'''
from types import NoneType
from pyth2.contracts import TypeValidator as tv
from pyth2.io.Stream import Stream, StreamDirection
class BinaryStream(Stream):
@tv.forms(object, StreamDirection)
def __init__(self, direction):
super(BinaryStream, self).__init__(direction, bytearray)
@tv.forms(object, tv.MoreThan(0, False))
@tv.returns((bytearray, NoneType))
@tv.raises(IOError)
def read(self, bufferSize = 1):
raise Exception("Not implemented")
@tv.forms(object, bytearray)
@tv.raises(IOError)
def write(self, contents):
raise Exception("Not implemented")
| 24.142857 | 64 | 0.678994 | 503 | 0.744083 | 0 | 0 | 442 | 0.653846 | 0 | 0 | 75 | 0.110947 |
802a2589990d23e2d91111185701c08442085e13 | 4,633 | py | Python | app/bin/dltk/test/dltk_deployment.py | splunk/deep-learning-toolkit | 84f9c978d9859a96f6ba566737a5c7102738d13c | [
"Apache-2.0"
] | 11 | 2020-10-13T05:27:59.000Z | 2021-09-23T02:56:32.000Z | app/bin/dltk/test/dltk_deployment.py | splunk/deep-learning-toolkit | 84f9c978d9859a96f6ba566737a5c7102738d13c | [
"Apache-2.0"
] | 48 | 2020-10-15T09:53:36.000Z | 2021-07-05T15:33:24.000Z | app/bin/dltk/test/dltk_deployment.py | splunk/deep-learning-toolkit | 84f9c978d9859a96f6ba566737a5c7102738d13c | [
"Apache-2.0"
] | 4 | 2020-12-04T08:51:35.000Z | 2022-03-27T09:42:20.000Z | import unittest
import os
import logging
import time
import re
import splunklib.client as client
import splunklib.results as results
from splunklib.binding import HTTPError
from . import dltk_api
from . import splunk_api
from . import dltk_environment
level_prog = re.compile(r'level=\"([^\"]*)\"')
msg_prog = re.compile(r'msg=\"((?:\n|.)*)\"')
def run_job(algorithm_name):
environment_name = dltk_environment.get_name()
# raise Exception("| savedsearch job:deploy:%s:%s | %s" % (
# algorithm_name,
# environment_name,
# 'rex field=_raw "level=\\"(?<level>[^\\"]*)\\", msg=\\"(?<msg>[^[\\"|\\\\"]*)\\"" | table level msg',
# ))
for event in splunk_api.search("| savedsearch job:deploy:%s:%s | %s" % (
algorithm_name,
environment_name,
#'rex field=_raw "level=\\"(?<level>[^\\"]*)\\", msg=\\"(?<msg>.*)\\"" | table _raw level msg',
#'rex field=_raw "level=\\"(?<level>[^\\"]*)\\", msg=\\"(?<msg>(?:\\n|.)*)\\"" | table _raw level msg',
'table _raw',
)):
raw = event["_raw"]
if "level" not in event:
m = level_prog.search(raw)
if m:
event["level"] = m.group(1)
if "msg" not in event:
m = msg_prog.search(raw)
if m:
event["msg"] = m.group(1)
if "level" in event:
level = event["level"]
else:
#logging.error("missing 'level' field in deploy result: %s" % (event))
raise Exception("missing 'level' field in deploy result: %s" % raw)
# continue
msg = event["msg"]
if level == "DEBUG":
log = logging.debug
elif level == "WARNING":
log = logging.warning
elif level == "ERROR":
log = logging.error
elif level == "INFO":
log = logging.info
else:
log = logging.warning
msg = "UNEXPECTED LEVEL (%s): %s" % (level, msg)
log(" %s" % msg)
def list_deployments(algorithm_name):
return dltk_api.call(
"GET",
"deployments",
data={
"algorithm": algorithm_name,
}
)
def get_deployment(algorithm_name, environment_name, raise_if_not_exists=True):
deployments = dltk_api.call(
"GET",
"deployments",
data={
"algorithm": algorithm_name,
"environment": environment_name,
}
)
if not len(deployments):
if raise_if_not_exists:
raise Exception("could not find deployment")
return None
return deployments[0]
def deploy(algorithm_name, params={}):
undeploy(algorithm_name)
splunk = splunk_api.connect()
environment_name = dltk_environment.get_name()
dltk_api.call("POST", "deployments", data={
**{
"algorithm": algorithm_name,
"environment": environment_name,
"enable_schedule": False,
},
**params,
}, return_entries=False)
try:
while True:
deployment = get_deployment(algorithm_name, environment_name, raise_if_not_exists=False)
if deployment:
deployment = get_deployment(algorithm_name, environment_name)
status = deployment["status"]
if status == "deploying":
logging.info("still deploying...")
run_job(algorithm_name)
continue
if status == "deployed":
break
status_message = deployment["status_message"]
raise Exception("unexpected deployment status: %s: %s" % (status, status_message))
logging.info("successfully deployed algo \"%s\"" % algorithm_name)
except:
logging.warning("error deploying '%s' to '%s' -> undeploying ..." % (algorithm_name, environment_name))
# while True:
# import time
# time.sleep(10)
undeploy(algorithm_name)
logging.warning("finished undeploying")
raise
def undeploy(algorithm_name):
splunk = splunk_api.connect()
environment_name = dltk_environment.get_name()
while True:
try:
dltk_api.call("DELETE", "deployments", data={
"algorithm": algorithm_name,
"environment": environment_name,
"enable_schedule": False,
}, return_entries=False)
except HTTPError as e:
logging.error("error calling API: %s" % e)
if e.status == 404:
break
raise
run_job(algorithm_name)
| 31.951724 | 111 | 0.550399 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,237 | 0.266998 |
802aa95cd6ad4a3dbc07c2c9c9a4f9fabd942f52 | 302 | py | Python | examples/example_dc/example_dc.py | OlivierKamers/PyDC | 51a1ded9d067694968dd5855a3c20fc39df882f3 | [
"Apache-2.0"
] | 3 | 2019-03-21T13:07:18.000Z | 2022-03-31T12:24:59.000Z | examples/example_dc/example_dc.py | OlivierKamers/PyDC | 51a1ded9d067694968dd5855a3c20fc39df882f3 | [
"Apache-2.0"
] | null | null | null | examples/example_dc/example_dc.py | OlivierKamers/PyDC | 51a1ded9d067694968dd5855a3c20fc39df882f3 | [
"Apache-2.0"
] | 3 | 2019-02-27T11:12:38.000Z | 2020-07-26T20:41:54.000Z | from pydc import DC
def main():
dc = DC("example_dc.pl", 200) #default is 0 sample (will produce nan if later on no n_samples provided)
prob1 = dc.query("drawn(1)~=1")
print(prob1)
prob2 = dc.query("drawn(1)~=1", n_samples=2000)
print(prob2)
if __name__ == "__main__":
main()
| 25.166667 | 107 | 0.639073 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.410596 |
802b03d8a8f74e07591150e943daaff1c7cc2c3e | 826 | py | Python | adet/modeling/DTInst/DTE/__init__.py | shuaiqi361/AdelaiDet | 35d944033a8d2f7aa623ad607b57bd8a1fe88b43 | [
"BSD-2-Clause"
] | null | null | null | adet/modeling/DTInst/DTE/__init__.py | shuaiqi361/AdelaiDet | 35d944033a8d2f7aa623ad607b57bd8a1fe88b43 | [
"BSD-2-Clause"
] | null | null | null | adet/modeling/DTInst/DTE/__init__.py | shuaiqi361/AdelaiDet | 35d944033a8d2f7aa623ad607b57bd8a1fe88b43 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .MaskLoader import MaskLoader
from .utils import IOUMetric, fast_ista, prepare_distance_transform_from_mask, \
prepare_overlay_DTMs_from_mask, prepare_extended_DTMs_from_mask, prepare_augmented_distance_transform_from_mask, \
prepare_distance_transform_from_mask_with_weights, tensor_to_dtm, prepare_complement_distance_transform_from_mask_with_weights
__all__ = ["MaskLoader", "IOUMetric",
"prepare_distance_transform_from_mask", "fast_ista", "tensor_to_dtm",
'prepare_overlay_DTMs_from_mask', 'prepare_extended_DTMs_from_mask',
'prepare_augmented_distance_transform_from_mask', 'prepare_distance_transform_from_mask_with_weights',
'prepare_complement_distance_transform_from_mask_with_weights']
| 68.833333 | 130 | 0.825666 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 383 | 0.46368 |
802b20d5dcc6e53ad2602168453ebf276324e2c7 | 143 | py | Python | Lms_Arpita/DatabaseConnectivity/MongoConnection.py | hcmuleva/personal-profile | 051b5a2f36b927951691f48abe584beb8bc25440 | [
"MIT"
] | null | null | null | Lms_Arpita/DatabaseConnectivity/MongoConnection.py | hcmuleva/personal-profile | 051b5a2f36b927951691f48abe584beb8bc25440 | [
"MIT"
] | 3 | 2020-07-13T17:46:32.000Z | 2020-07-26T10:30:59.000Z | Lms_Arpita/DatabaseConnectivity/MongoConnection.py | hcmuleva/personal-profile | 051b5a2f36b927951691f48abe584beb8bc25440 | [
"MIT"
] | null | null | null | from pymongo import MongoClient
def path():
client = MongoClient('mongodb://localhost:27017/')
db = client['UserBook']
return db
| 17.875 | 54 | 0.678322 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.265734 |
802cdedc55b19a30eabaf8043c64d8287cf38eb3 | 1,064 | py | Python | example/exc/client.py | so1n/rap | e4e3f4fab9df6190793ec97008bccb669546f207 | [
"Apache-2.0"
] | 3 | 2020-12-24T14:42:49.000Z | 2022-03-23T07:28:58.000Z | example/exc/client.py | so1n/rap | e4e3f4fab9df6190793ec97008bccb669546f207 | [
"Apache-2.0"
] | 1 | 2021-01-20T10:24:49.000Z | 2021-01-30T07:52:47.000Z | example/exc/client.py | so1n/rap | e4e3f4fab9df6190793ec97008bccb669546f207 | [
"Apache-2.0"
] | null | null | null | import asyncio
import time
from rap.client import Client
from rap.common.exceptions import FuncNotFoundError
client: Client = Client("example", [{"ip": "localhost", "port": "9000"}])
# in register, must use async def...
@client.register()
async def raise_msg_exc(a: int, b: int) -> int:
pass
# in register, must use async def...
@client.register()
async def raise_server_not_found_func_exc(a: int) -> None:
pass
async def main() -> None:
s_t = time.time()
await client.start()
try:
await raise_msg_exc(1, 2)
except Exception as e:
assert isinstance(e, ZeroDivisionError)
try:
await raise_server_not_found_func_exc(1)
except Exception as e:
assert isinstance(e, FuncNotFoundError)
print(time.time() - s_t)
await client.stop()
if __name__ == "__main__":
import logging
logging.basicConfig(
format="[%(asctime)s %(levelname)s] %(message)s", datefmt="%y-%m-%d %H:%M:%S", level=logging.INFO
)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 23.130435 | 105 | 0.662594 | 0 | 0 | 0 | 0 | 161 | 0.151316 | 499 | 0.468985 | 178 | 0.167293 |
802cfac0d8c03fbbb9f5e792cb2a2873b402427e | 3,544 | py | Python | openapi/spec/ext/spec/base.py | t2y/openapi-ext-tools | 1253053af4f9a90f85b611e79a8f39c7d226a002 | [
"Apache-2.0"
] | 1 | 2020-08-30T07:47:57.000Z | 2020-08-30T07:47:57.000Z | openapi/spec/ext/spec/base.py | t2y/openapi-ext-tools | 1253053af4f9a90f85b611e79a8f39c7d226a002 | [
"Apache-2.0"
] | null | null | null | openapi/spec/ext/spec/base.py | t2y/openapi-ext-tools | 1253053af4f9a90f85b611e79a8f39c7d226a002 | [
"Apache-2.0"
] | null | null | null | import os
from ..utils.log import log
from ..utils.yaml import read_yaml, write_yaml
class BaseSpec:
COMPONENTS = 'components'
REF_FIELD = '$ref'
def __init__(self, path,
read_func=read_yaml, write_func=write_yaml):
self.path = path
self.path_dir = os.path.dirname(path)
self.read_func = read_yaml
self.write_func = write_yaml
self.data = None
self.ref_filenames = set()
self.ref_paths = []
self.ref_spec = {}
def __enter__(self):
self.read()
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def read(self):
self.data = self.read_func(self.path)
def write(self, path):
self.write_func(self.data, path)
def get_external_refs_from_object(self, data):
for key, value in data.items():
if isinstance(value, dict):
yield from self.get_external_refs_from_object(value)
elif isinstance(value, list):
yield from self.get_external_refs_from_list(value)
if key == self.REF_FIELD:
pos = value.find('#/')
if pos > 0:
filename = value[:pos]
if os.path.basename(self.path) != filename:
yield filename
def get_external_refs_from_list(self, data):
for value in data:
if isinstance(value, dict):
yield from self.get_external_refs_from_object(value)
elif isinstance(value, list):
yield from self.get_external_refs_from_list(value)
def get_external_refs(self, data):
yield from self.get_external_refs_from_object(data)
def walk(self, data):
for filename in self.get_external_refs(data):
self.ref_filenames.add(filename)
def create_ref_spec(self, ref_path):
with ReferenceSpec(ref_path) as spec:
spec.resolve()
spec.bundle()
log.debug(f'created ref spec: {ref_path}')
return spec
def resolve(self):
self.walk(self.data)
for filename in self.ref_filenames:
ref_path = os.path.join(self.path_dir, filename)
self.ref_paths.append(ref_path)
self.ref_spec[filename] = self.create_ref_spec(ref_path)
self.replace_ref_fields(self.data)
def replace_ref_fields(self, data):
def replace(data, field, value):
pos = value.find('#/')
if pos > 0:
filename = value[:pos]
data[field] = value.replace(f'{filename}', '')
log.debug(f'replaced ref field "{value}" to "{data[field]}"')
for field, value in data.items():
if isinstance(value, dict):
self.replace_ref_fields(value)
elif isinstance(value, list):
for v in value:
self.replace_ref_fields({'dummy': v})
if field == self.REF_FIELD:
replace(data, field, value)
def merge_components(self):
components = self.data.get(self.COMPONENTS, {})
for spec in self.ref_spec.values():
spec_components = spec.data.get(self.COMPONENTS, {})
for key, value in spec_components.items():
components.setdefault(key, {})
components[key].update(value)
def bundle(self):
self.merge_components()
class ReferenceSpec(BaseSpec):
pass
class BundledSpec(BaseSpec):
pass
| 29.781513 | 77 | 0.582957 | 3,449 | 0.973194 | 939 | 0.264955 | 0 | 0 | 0 | 0 | 129 | 0.0364 |
802f2d6813fe3fdbbab4b7f3e7b7c3d02dc46145 | 2,750 | py | Python | code/task6_step1_train_model.py | p-koo/exponential_activations | 7e48054b64a565364439c45932338a09eb2eb4d3 | [
"MIT"
] | 1 | 2021-09-18T04:09:07.000Z | 2021-09-18T04:09:07.000Z | code/task6_step1_train_model.py | koo-lab/exponential_activations | 9032a360c1abb0f07b824e3ce6d20707efe306fd | [
"MIT"
] | null | null | null | code/task6_step1_train_model.py | koo-lab/exponential_activations | 9032a360c1abb0f07b824e3ce6d20707efe306fd | [
"MIT"
] | 4 | 2020-08-03T02:08:42.000Z | 2021-10-01T18:46:47.000Z | import os
import numpy as np
from six.moves import cPickle
from tensorflow import keras
from tensorflow import keras
import helper
from tfomics import utils, metrics, explain
#------------------------------------------------------------------------
model_names = ['residualbind']
activations = ['exponential', 'relu']#
results_path = utils.make_directory('../results', 'task6')
params_path = utils.make_directory(results_path, 'model_params')
#------------------------------------------------------------------------
file_path = '../data/IRF1_400_h3k27ac.h5'
data = helper.load_data(file_path, reverse_compliment=True)
x_train, y_train, x_valid, y_valid, x_test, y_test = data
#------------------------------------------------------------------------
file_path = os.path.join(results_path, 'task6_classification_performance.tsv')
with open(file_path, 'w') as f:
f.write('%s\t%s\t%s\n'%('model', 'ave roc', 'ave pr'))
results = {}
for model_name in model_names:
for activation in activations:
keras.backend.clear_session()
# load model
model = helper.load_model(model_name, activation=activation)
name = model_name+'_'+activation+'_irf1'
print('model: ' + name)
# compile model
helper.compile_model(model)
# setup callbacks
callbacks = helper.get_callbacks(monitor='val_auroc', patience=20,
decay_patience=5, decay_factor=0.2)
# train model
history = model.fit(x_train, y_train,
epochs=100,
batch_size=100,
shuffle=True,
validation_data=(x_valid, y_valid),
callbacks=callbacks)
# save model
weights_path = os.path.join(params_path, name+'.hdf5')
model.save_weights(weights_path)
# predict test sequences and calculate performance metrics
predictions = model.predict(x_test)
mean_vals, std_vals = metrics.calculate_metrics(y_test, predictions, 'binary')
# print results to file
f.write("%s\t%.3f\t%.3f\n"%(name, mean_vals[1], mean_vals[2]))
# calculate saliency on a subset of data
true_index = np.where(y_test[:,0] == 1)[0]
X = x_test[true_index][:500]
results[name] = explain.saliency(model, X, class_index=0, layer=-1)
# save results
file_path = os.path.join(results_path, 'task6_saliency_results.pickle')
with open(file_path, 'wb') as f:
cPickle.dump(results, f, protocol=cPickle.HIGHEST_PROTOCOL)
| 37.162162 | 90 | 0.552727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 697 | 0.253455 |
802f9b17bf794ac63d1cddf593f5ad25c1f4a96b | 6,564 | py | Python | ensemble_detectors/src/Algorithm_1_matchfilter/ortho_correction.py | satish1901/Methane-detection-from-hyperspectral-imagery | 741dee02e76931f572cf3e06af8faabe871e8e4a | [
"MIT"
] | 27 | 2020-06-11T21:59:54.000Z | 2022-03-22T03:10:50.000Z | ensemble_detectors/src/Algorithm_1_matchfilter/ortho_correction.py | N-NSH/Methane-detection-from-hyperspectral-imagery | 741dee02e76931f572cf3e06af8faabe871e8e4a | [
"MIT"
] | 7 | 2020-09-25T22:41:18.000Z | 2022-02-09T23:41:04.000Z | ensemble_detectors/src/Algorithm_1_matchfilter/ortho_correction.py | N-NSH/Methane-detection-from-hyperspectral-imagery | 741dee02e76931f572cf3e06af8faabe871e8e4a | [
"MIT"
] | 4 | 2021-01-18T15:57:13.000Z | 2022-03-12T20:51:27.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 21 14:15:38 2019
@author: Satish
"""
# doing the ortho-correction on the processed data from matchedFilter
import os
import numpy as np
import spectral as spy
import spectral.io.envi as envi
import spectral.algorithms as algo
from spectral.algorithms.detectors import MatchedFilter, matched_filter
import logging
import coloredlogs
import json
import shutil
import statistics
# set the logger
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("aviris_data_loader")
coloredlogs.install(level='DEBUG', logger=logger)
#DIRECTORY = "/media/data/satish/avng.jpl.nasa.gov/pub/test_unrect"
DIRECTORY = "../../data/raw_data"
#manual offset file load
try:
#Read the manually computed offset file
f = open('./manual_offset.json')
offset_data = json.load(f)
OFFSET_DICT = offset_data['OFFSET_DICT']
except:
print("No manual offset file found")
pass
FILES = []
for x in os.listdir(DIRECTORY):
if(os.path.isdir(os.path.join(DIRECTORY, x))):
FILES.append(x)
print(FILES)
#%% return image object
def image_obj(hdr, img):
"create a object of the image corresponding to certain header"
head = envi.read_envi_header(hdr)
param = envi.gen_params(head)
param.filename = img # spectral data file corresponding to .hdr file
interleave = head['interleave']
if (interleave == 'bip' or interleave == 'BIP'):
print("it is a bip")
from spectral.io.bipfile import BipFile
img_obj = BipFile(param, head)
if (interleave == 'bil' or interleave == 'BIL'):
print("It is a bil file")
from spectral.io.bilfile import BilFile
img_obj = BilFile(param, head)
return img_obj
# Use this fucntion in case you have data other than the custom dataset
def ideal_ortho_correction(glt: np.ndarray, img: np.ndarray, b_val=0.0, output=None) -> np.ndarray:
"""does the ortho-correction of the file
glt: 2L, world-relative coordinates L1: y (rows), L2: x (columns)
img: 1L, unrectified, output from matched filter
output: 1L, rectified version of img, with shape: glt.shape
"""
if output is None:
output = np.zeros((glt.shape[0], glt.shape[1]))
if not np.array_equal(output.shape, [glt.shape[0], glt.shape[1]]):
print("image dimension of output arrary do not match the GLT file")
# getting the absolute even if GLT has negative values
# magnitude
glt_mag = np.absolute(glt)
# GLT value of zero means no data, extract this because python has zero-indexing.
glt_mask = np.all(glt_mag==0, axis=2)
output[glt_mask] = b_val
glt_mag[glt_mag>(img.shape[0]-1)] = 0
# now check the lookup and fill in the location, -1 to map to zero-indexing
# output[~glt_mask] = img[glt_mag[~glt_mask, 1] - 1, glt_mag[~glt_mask, 0] - 1]
output[~glt_mask] = img[glt_mag[~glt_mask, 1]-1, glt_mag[~glt_mask, 0]-1]
return output
def custom_ortho_correct_for_data(file_name, glt: np.ndarray, img: np.ndarray, OFFSET_DICT, b_val=0.0, output=None) -> np.ndarray:
"""does the ortho-correction of the file
glt: 2L, world-relative coordinates L1: y (rows), L2: x (columns)
img: 1L, unrectified, output from matched filter
output: 1L, rectified version of img, with shape: glt.shape
"""
if output is None:
output = np.zeros((glt.shape[0], glt.shape[1]))
if not np.array_equal(output.shape, [glt.shape[0], glt.shape[1]]):
print("image dimension of output arrary do not match the GLT file")
print(file_name)
if file_name in OFFSET_DICT.keys():
offset_mul = OFFSET_DICT[file_name]
else:
return 0
print(offset_mul)
off_v = int(offset_mul*1005)
img_readB = img[off_v:img.shape[0],:]
img_readA = img[0:off_v,:]
img_read = np.vstack((img_readB,img_readA))
if ((glt.shape[0]-img.shape[0])>0):
print("size mismatch. Fixing it...")
completion_shape = np.zeros((glt.shape[0]-img.shape[0], img.shape[1]))
img_read = np.vstack((img_read,completion_shape))
print(img_read.shape)
# getting the absolute even if GLT has negative values
# magnitude
glt_mag = np.absolute(glt)
# GLT value of zero means no data, extract this because python has zero-indexing.
glt_mask = np.all(glt_mag==0, axis=2)
output[glt_mask] = b_val
glt_mag[glt_mag>(img.shape[0]-1)] = 0
# now check the lookup and fill in the location, -1 to map to zero-indexing
output[~glt_mask] = img_read[glt_mag[~glt_mask,1]-1, glt_mag[~glt_mask,0]-1]
return output
#%% load file and rectify it in each band
for fname in FILES:
fname_glt = fname.split("_")[0]
sname_glt = f'{fname_glt}_rdn_glt' #geo-ref file for ortho-correction
hname_glt = f'{sname_glt}.hdr' #header file
glt_img = f'{DIRECTORY}/{fname}/{sname_glt}'
glt_hdr = f'{DIRECTORY}/{fname}/{hname_glt}'
print(glt_img, glt_hdr)
mf_folder = f'{DIRECTORY}/{fname}/{fname_glt}_rdn_v1f_clip_mfout'
try:
if (fname_glt not in OFFSET_DICT.keys()):
continue
if (os.path.exists(glt_hdr)):
glt_data_obj = image_obj(glt_hdr, glt_img)
glt = glt_data_obj.read_bands([0,1])
else:
continue
except:
pass
#mf_rect_path = f'/media/data/satish/detector_bank_input/corrected_output'
mf_rect_folder = f'{DIRECTORY}/{fname}/{fname_glt}_rect'
if not(os.path.isdir(mf_rect_folder)):
os.mkdir(mf_rect_folder)
print("\nDirectory", mf_rect_folder ," created.")
elif os.path.isdir(mf_rect_folder):
print("\nDirectory", mf_rect_folder ," already exists..deleting it")
shutil.rmtree(mf_rect_folder)
os.mkdir(mf_rect_folder)
print("\nNew Directory", mf_rect_folder ," created.")
for mfname in os.listdir(mf_folder):
print("Ortho-correcting file", mfname)
mf_filename = f'{mf_folder}/{mfname}'
img_unrect = np.load(mf_filename)
print(img_unrect.shape)
'''
use this function in case you have any other dataset, the custom_ortho_correct_for_data
function uses the OFFSET_DICT to correct the row positions in each band.
rect_img = ideal_ortho_correction(fname_glt, glt, img_unrect)
'''
rect_img = custom_ortho_correct_for_data(fname_glt, glt, img_unrect, OFFSET_DICT)
rect_filename = f'{mf_rect_folder}/{mfname}'
np.save(rect_filename, rect_img)
| 37.084746 | 130 | 0.6688 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,633 | 0.401127 |
80300712704795c8083886af8ccc60e875ba3cce | 1,519 | py | Python | rmv_test.py | BenDoan/rmv | d6203d988faa44df238ecb4bf8f3770e1e5d315a | [
"MIT"
] | 1 | 2015-04-02T05:24:10.000Z | 2015-04-02T05:24:10.000Z | rmv_test.py | BenDoan/rmv | d6203d988faa44df238ecb4bf8f3770e1e5d315a | [
"MIT"
] | null | null | null | rmv_test.py | BenDoan/rmv | d6203d988faa44df238ecb4bf8f3770e1e5d315a | [
"MIT"
] | null | null | null | import unittest
import os
import shutil
import subprocess
from math import ceil
TEST_DIR = "testdir"
MOVE_DIR = "movedir"
NUM_FILES = 100
class TestDefault(unittest.TestCase):
def setUp(self):
if os.path.exists(TEST_DIR):
shutil.rmtree(TEST_DIR)
os.makedirs(TEST_DIR)
os.chdir(TEST_DIR)
os.makedirs("%s" % MOVE_DIR)
for n in range(NUM_FILES):
with open("test%s.txt" % n, 'w+') as f:
f.write("a")
def test_move(self):
subprocess.call(["../rmv", MOVE_DIR])
self.assertEqual(len(os.listdir(MOVE_DIR)), ceil(NUM_FILES/2))
def tearDown(self):
os.chdir("..")
shutil.rmtree(TEST_DIR)
class TestPercent(TestDefault):
def test_move(self):
subprocess.call(["../rmv","-p 33", MOVE_DIR])
self.assertEqual(len(os.listdir(MOVE_DIR)), ceil(NUM_FILES*.33))
class TestGlob(TestDefault):
def test_move(self):
for n in range(NUM_FILES):
with open("test%s.dat" % n, 'w+') as f:
f.write("test")
subprocess.call(["../rmv",'-g*.txt', MOVE_DIR])
self.assertEqual(len(os.listdir(MOVE_DIR)), ceil(NUM_FILES/2))
class TestSource(TestDefault):
def test_move(self):
os.makedirs("nesteddir")
os.chdir("nesteddir")
subprocess.call(["../../rmv","..", "../%s" % MOVE_DIR])
os.chdir("..")
self.assertEqual(len(os.listdir(MOVE_DIR)), ceil(NUM_FILES/2))
if __name__ == '__main__':
unittest.main()
| 27.618182 | 72 | 0.597103 | 1,323 | 0.870968 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.108624 |
8031820bcdd231e70b0ca877db09b4495f4d4e66 | 2,155 | py | Python | src/nuspacesim/data/RenoNu2TauTables/read_reno_nu2tautables.py | NuSpaceSim/nuSpaceSim | 50d3878d37fc66ba3b275b9a71b6421eba5cdeb9 | [
"BSD-3-Clause-Clear"
] | 7 | 2021-12-07T16:09:30.000Z | 2022-02-18T19:48:35.000Z | src/nuspacesim/data/RenoNu2TauTables/read_reno_nu2tautables.py | NuSpaceSim/nuSpaceSim | 50d3878d37fc66ba3b275b9a71b6421eba5cdeb9 | [
"BSD-3-Clause-Clear"
] | 18 | 2021-10-12T20:04:46.000Z | 2022-03-31T19:51:11.000Z | src/nuspacesim/data/RenoNu2TauTables/read_reno_nu2tautables.py | NuSpaceSim/nuSpaceSim | 50d3878d37fc66ba3b275b9a71b6421eba5cdeb9 | [
"BSD-3-Clause-Clear"
] | null | null | null | import math
import h5py
import numpy as np
def extract_pexit_data(filename):
infile = open(filename, "r")
data = [line.split() for line in infile]
b = [(math.pi * float(lne[0]) / 180.0) for lne in data]
le = [math.log10(float(lne[1])) for lne in data]
p = [math.log10(float(lne[-1])) for lne in data]
infile.close()
return b, le, p
def extra_taudist_data(filename):
bdeg = np.array([1.0, 3.0, 5.0, 7.0, 10.0, 12.0, 15.0, 17.0, 20.0, 25.0])
infile = open(filename, "r")
data = [line.split() for line in infile]
brad = math.pi * bdeg / 180.0
z = np.array([float(lne[0]) for lne in data])
for lne in data:
del lne[0]
cv = np.array(data, float)
infile.close()
return z, brad, cv
def main():
f = h5py.File("RenoNu2TauTables/nu2taudata.hdf5", "w")
pexitgrp = f.create_group("pexitdata")
blist, lelist, plist = extract_pexit_data("RenoNu2TauTables/multi-efix.26")
beta = np.array(blist)
logenergy = np.array(lelist)
pexitval = np.array(plist)
buniq = np.unique(beta)
leuniq = np.unique(logenergy)
pexitarr = pexitval.reshape((leuniq.size, buniq.size))
pexitgrp.create_dataset("BetaRad", data=buniq, dtype="f")
pexitgrp.create_dataset("logNuEnergy", data=leuniq, dtype="f")
pexitgrp.create_dataset("logPexit", data=pexitarr, dtype="f")
for lognuenergy in np.arange(7.0, 11.0, 0.25):
mygrpstring = "TauEdist_grp_e{:02.0f}_{:02.0f}".format(
math.floor(lognuenergy), (lognuenergy - math.floor(lognuenergy)) * 100
)
tedistgrp = f.create_group(mygrpstring)
myfilestring = (
"RenoNu2TauTables/nu2tau-angleC-e{:02.0f}-{:02.0f}smx.dat".format(
math.floor(lognuenergy), (lognuenergy - math.floor(lognuenergy)) * 100
)
)
tauEfrac, tdbeta, cdfvalues = extra_taudist_data(myfilestring)
tedistgrp.create_dataset("TauEFrac", data=tauEfrac, dtype="f")
tedistgrp.create_dataset("BetaRad", data=tdbeta, dtype="f")
tedistgrp.create_dataset("TauEDistCDF", data=cdfvalues, dtype="f")
if __name__ == "__main__":
main()
| 33.153846 | 86 | 0.632947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 269 | 0.124826 |
8032277d46ee175709fb44ad0a48ddd9e2bd0bf3 | 4,443 | py | Python | makePredictions.py | psyxusheng/csedm-Data-Challenge | d746fbd21421bf7c1e5500567dbd1f32a48307b3 | [
"MIT"
] | null | null | null | makePredictions.py | psyxusheng/csedm-Data-Challenge | d746fbd21421bf7c1e5500567dbd1f32a48307b3 | [
"MIT"
] | null | null | null | makePredictions.py | psyxusheng/csedm-Data-Challenge | d746fbd21421bf7c1e5500567dbd1f32a48307b3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
import csv
import tensorflow as tf
from config import Config
from DataFeeder import DataFeeder,TestData
from model import DKT
from sklearn.metrics import f1_score,precision_score,recall_score
indices = [precision_score,recall_score,f1_score]
def make_prediction(folderName,index,max_iters = 200,target_key = 'FirstCorrect'):
tf.reset_default_graph()
cfg = Config(dataFile = '%s/Training.csv'%folderName)
cfg.load()
DF_train = DataFeeder(cfg)
# problem vectors cfg.probVecs
features = [['ProblemID','inp',[cfg.numP,8],False],
['FirstCorrect','inp',[2,8],True],
['EverCorrect','inp',[2,8],True],
['UsedHint','inp',[2,8],True]]
targets = [['FirstCorrect',2 , 1. , [1., 1.2]]]
model4train = DKT(features = features,
targets = targets,
keep_prob = 0.1,
num_items = cfg.numP,
rnn_units = [32,32],
training = True,
lr_decay = [1e-3,0.9,50])
model4test = DKT(features = features,
targets = targets,
keep_prob = 1.,
num_items = cfg.numP,
rnn_units = [32,32],
training = False,
lr_decay = [5*1e-2,0.9,100])
session = tf.Session()
session.run(tf.global_variables_initializer())
print('training on %s'%folderName)
for i in range(1,max_iters+1):
inputs,targets,bu_masks = DF_train.next_batch(batch_size = DF_train.size,
cum = True)
feed_data = model4train.zip_data(inputs,model4train.input_holders)
feed_data_t = model4train.zip_data(targets,model4train.target_holders)
feed_data.update(feed_data_t)
_,predicts,costs = session.run([model4train.trainop,
model4train.predicts,
model4train.costs] ,
feed_dict=feed_data)
if i%max_iters == 0:
for name,values in predicts.items():
# y_pred = values[bu_masks]
# y_true = targets[name][bu_masks]
# indices = [func(y_true,y_pred) for func in evalue_indices]
print('final cost',round(costs[target_key],3))
cfg_test = Config(dataFile = '%s/Test.csv'%folderName)
cfg_test.load()
TD = TestData(cfg_test)
result = []
predictions = []
groundtruth = []
for data,(inputs,targets,seqIndices) in TD.export():
feed_data = model4test.zip_data(inputs,model4test.input_holders)
predicts,probablities = session.run([model4test.predicts,
model4test.probablities],feed_dict = feed_data)
probs_on_correct = probablities[target_key][0,np.arange(inputs['lengths'][0]),seqIndices,1]
y_pred = predicts[target_key][0,np.arange(inputs['lengths'][0]),seqIndices]
y_true = targets[target_key][0,:]
predictions.append(y_pred)
groundtruth.append(y_true)
for i in range(data.shape[0]):
raw_data = list(data.iloc[i,:].values)
raw_data +=[float(probs_on_correct[i]) , int(y_pred[i]) , index]
result.append(raw_data)
y_true = np.concatenate(groundtruth,axis=0)
y_pred = np.concatenate(predictions,axis=0)
index = [round(func(y_true,y_pred),3) for func in indices]
print(' '*4,'testing',index)
return result,list(data.columns)
def main(datafolder):
total_predicts = []
for i in range(10):
predicts,labels = make_prediction(folderName = datafolder+'/fold%d'%i,
index = i,
max_iters = 400)
total_predicts.extend(predicts)
fobj = open('cv_predict.csv','w',newline='')
writer = csv.writer(fobj)
writer.writerow(labels+['pCorrectProblem','prediction','fold'])
for line in total_predicts:
writer.writerow(line)
fobj.close()
return True
if __name__=='__main__':
dataFolder = r'C:\Users\G7\Desktop\itemRL\DataChellenge\CV'
main(dataFolder)
| 41.138889 | 100 | 0.55458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 530 | 0.119289 |
80323c5cb03f15cf7a86b6b63c25945da75b4b63 | 1,579 | py | Python | bianalyzer/relevance/similarity_matrix.py | luntos/bianalyzer | ce6c1efdf192c0e5e7ed648d6e9dd85be3c7b14b | [
"MIT"
] | 4 | 2016-02-10T22:44:37.000Z | 2019-02-26T04:57:11.000Z | bianalyzer/relevance/similarity_matrix.py | luntos/bianalyzer | ce6c1efdf192c0e5e7ed648d6e9dd85be3c7b14b | [
"MIT"
] | null | null | null | bianalyzer/relevance/similarity_matrix.py | luntos/bianalyzer | ce6c1efdf192c0e5e7ed648d6e9dd85be3c7b14b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from ..helpers import construct_similarity_matrix_via_profiles
class SimilarityMatrix:
def __init__(self, keywords, matrix):
self.keywords = keywords
self.matrix = matrix
def construct_similarity_matrix(relevance_matrix, relevance_threshold=0.2):
"""
Constructs keyword similarity matrix by the given relevance_matrix
NOTE: final similarity matrix may contain not all the keywords (only those that are highly relevant to
at least one of the texts)
:param relevance_matrix: instance of SimilarityMatrix
:param relevance_threshold: a value in range [0, 1)
:return: instance of a class SimilarityMatrix
"""
# create relevance profiles
relevance_profiles = []
keywords = relevance_matrix.keywords
max_score = relevance_matrix.max_relevance_score
# print 'max score: %s' % max_score
real_threshold = relevance_threshold * max_score
relevant_keywords = []
for (i, keyword) in enumerate(keywords):
keyword_row = relevance_matrix.matrix[i]
relevance_profile = set([i for i, val in enumerate(keyword_row) if val >= real_threshold])
if len(relevance_profile) > 0:
# print 'keyword: %s, relevance profile size: %s' % (keyword, len(relevance_profile))
relevant_keywords.append(keyword)
relevance_profiles.append(relevance_profile)
keyword_similarity_matrix = construct_similarity_matrix_via_profiles(relevant_keywords, relevance_profiles)
return SimilarityMatrix(relevant_keywords, keyword_similarity_matrix)
| 41.552632 | 111 | 0.733376 | 127 | 0.080431 | 0 | 0 | 0 | 0 | 0 | 0 | 554 | 0.350855 |
8032858404b723ecb11e6d7ac5febb5da3de0fd6 | 1,452 | py | Python | src/block/mcmc.py | zeou1/maggot_models | 4e1b518c2981ab1ca9607099c3813e8429d94ca4 | [
"BSD-3-Clause"
] | null | null | null | src/block/mcmc.py | zeou1/maggot_models | 4e1b518c2981ab1ca9607099c3813e8429d94ca4 | [
"BSD-3-Clause"
] | null | null | null | src/block/mcmc.py | zeou1/maggot_models | 4e1b518c2981ab1ca9607099c3813e8429d94ca4 | [
"BSD-3-Clause"
] | null | null | null | # TODO write utilities for running MCMC stuff
import networkx as nx
from graph_tool.inference import minimize_blockmodel_dl
from graph_tool import load_graph
import numpy as np
import pandas as pd
import os
from src.graph import MetaGraph
def run_minimize_blockmodel(mg, temp_loc=None, weight_model=None):
meta = mg.meta.copy()
meta = pd.DataFrame(mg.meta["neuron_name"])
mg = MetaGraph(mg.adj, meta)
if temp_loc is None:
temp_loc = f"maggot_models/data/interim/temp-{np.random.randint(1e8)}.graphml"
# save to temp
nx.write_graphml(mg.g, temp_loc)
# load into graph-tool from temp
g = load_graph(temp_loc, fmt="graphml")
os.remove(temp_loc)
total_degrees = g.get_total_degrees(g.get_vertices())
remove_verts = np.where(total_degrees == 0)[0]
g.remove_vertex(remove_verts)
if weight_model is not None:
recs = [g.ep.weight]
rec_types = [weight_model]
else:
recs = []
rec_types = []
state_args = dict(recs=recs, rec_types=rec_types)
min_state = minimize_blockmodel_dl(g, verbose=False, state_args=state_args)
blocks = list(min_state.get_blocks())
verts = g.get_vertices()
block_map = {}
for v, b in zip(verts, blocks):
cell_id = int(g.vertex_properties["_graphml_vertex_id"][v])
block_map[cell_id] = int(b)
block_series = pd.Series(block_map)
block_series.name = "block_label"
return block_series
| 29.632653 | 86 | 0.694215 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 213 | 0.146694 |
803369c9001e4847c771fed5ca6b7aaff0451aac | 2,832 | py | Python | reo/migrations/0064_auto_20200616_1708.py | GUI/REopt_Lite_API | f2ade81b67c526cbe778c7bc584e3e1d616c1efc | [
"BSD-3-Clause"
] | 41 | 2020-02-21T08:25:17.000Z | 2022-01-14T23:06:42.000Z | reo/migrations/0064_auto_20200616_1708.py | GUI/REopt_Lite_API | f2ade81b67c526cbe778c7bc584e3e1d616c1efc | [
"BSD-3-Clause"
] | 167 | 2020-02-17T17:26:47.000Z | 2022-01-20T20:36:54.000Z | reo/migrations/0064_auto_20200616_1708.py | GUI/REopt_Lite_API | f2ade81b67c526cbe778c7bc584e3e1d616c1efc | [
"BSD-3-Clause"
] | 31 | 2020-02-20T00:22:51.000Z | 2021-12-10T05:48:08.000Z | # Generated by Django 2.2.10 on 2020-06-16 17:08
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reo', '0063_auto_20200521_1528'),
]
operations = [
migrations.AddField(
model_name='profilemodel',
name='julia_input_construction_seconds',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_input_construction_seconds_bau',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_reopt_constriants_seconds',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_reopt_constriants_seconds_bau',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_reopt_optimize_seconds',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_reopt_optimize_seconds_bau',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_reopt_postprocess_seconds',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_reopt_postprocess_seconds_bau',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_reopt_preamble_seconds',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_reopt_preamble_seconds_bau',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_reopt_variables_seconds',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='profilemodel',
name='julia_reopt_variables_seconds_bau',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='loadprofilemodel',
name='doe_reference_name',
field=django.contrib.postgres.fields.ArrayField(base_field=models.TextField(blank=True, null=True), default=list, size=None),
),
]
| 35.4 | 137 | 0.607345 | 2,700 | 0.95339 | 0 | 0 | 0 | 0 | 0 | 0 | 690 | 0.243644 |
80345a58636aa3864d7f2094b4e668ce2c2cd81a | 2,378 | py | Python | solo/losses/simclr.py | xwyzsn/solo-learn | 16d021d8053439a3de205337ab2a11d191500b09 | [
"MIT"
] | 693 | 2021-05-31T15:48:32.000Z | 2022-03-31T17:12:46.000Z | solo/losses/simclr.py | xwyzsn/solo-learn | 16d021d8053439a3de205337ab2a11d191500b09 | [
"MIT"
] | 151 | 2021-06-15T00:22:57.000Z | 2022-03-27T15:17:02.000Z | solo/losses/simclr.py | xwyzsn/solo-learn | 16d021d8053439a3de205337ab2a11d191500b09 | [
"MIT"
] | 79 | 2021-06-02T10:31:15.000Z | 2022-03-25T01:25:09.000Z | # Copyright 2021 solo-learn development team.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import torch
import torch.nn.functional as F
from solo.utils.misc import gather, get_rank
def simclr_loss_func(
z: torch.Tensor, indexes: torch.Tensor, temperature: float = 0.1
) -> torch.Tensor:
"""Computes SimCLR's loss given batch of projected features z
from different views, a positive boolean mask of all positives and
a negative boolean mask of all negatives.
Args:
z (torch.Tensor): (N*views) x D Tensor containing projected features from the views.
indexes (torch.Tensor): unique identifiers for each crop (unsupervised)
or targets of each crop (supervised).
Return:
torch.Tensor: SimCLR loss.
"""
z = F.normalize(z, dim=-1)
gathered_z = gather(z)
sim = torch.exp(torch.einsum("if, jf -> ij", z, gathered_z) / temperature)
gathered_indexes = gather(indexes)
indexes = indexes.unsqueeze(0)
gathered_indexes = gathered_indexes.unsqueeze(0)
# positives
pos_mask = indexes.t() == gathered_indexes
pos_mask[:, z.size(0) * get_rank() :].fill_diagonal_(0)
# negatives
neg_mask = indexes.t() != gathered_indexes
pos = torch.sum(sim * pos_mask, 1)
neg = torch.sum(sim * neg_mask, 1)
loss = -(torch.mean(torch.log(pos / (pos + neg))))
return loss
| 39.633333 | 92 | 0.722876 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,584 | 0.666106 |
803504701a3cf401c13dc50ffb64243deaa7a721 | 1,966 | py | Python | shop/migrations/0001_initial.py | chidibede/Django-Ecommerce-Site | c3a139ccf6e67ea90ab3879afcb16528be008548 | [
"MIT"
] | null | null | null | shop/migrations/0001_initial.py | chidibede/Django-Ecommerce-Site | c3a139ccf6e67ea90ab3879afcb16528be008548 | [
"MIT"
] | null | null | null | shop/migrations/0001_initial.py | chidibede/Django-Ecommerce-Site | c3a139ccf6e67ea90ab3879afcb16528be008548 | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2019-06-08 10:32
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Adult_Products',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='product_images')),
('name', models.CharField(max_length=200)),
('category', models.CharField(max_length=300)),
('slug', models.SlugField()),
('sales_price', models.IntegerField()),
('original_price', models.IntegerField()),
],
),
migrations.CreateModel(
name='Essential_Oils',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='product_images')),
('name', models.CharField(max_length=200)),
('category', models.CharField(max_length=300)),
('slug', models.SlugField()),
('sales_price', models.IntegerField()),
('original_price', models.IntegerField()),
],
),
migrations.CreateModel(
name='Smart_Watches',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='product_images')),
('name', models.CharField(max_length=200)),
('category', models.CharField(max_length=300)),
('slug', models.SlugField()),
('sales_price', models.IntegerField()),
('original_price', models.IntegerField()),
],
),
]
| 38.54902 | 114 | 0.544761 | 1,875 | 0.953713 | 0 | 0 | 0 | 0 | 0 | 0 | 338 | 0.171923 |
803911ad68063ce4c7a23b2522b750059f50235b | 21,773 | py | Python | pin_kit/extras/pinplay/PinPoints/scripts/regions.py | sawansib/Sniper | 45ec1eeb09b81a7250bc1a1aaa452f16b2b7f497 | [
"MIT"
] | 1 | 2021-04-22T05:27:08.000Z | 2021-04-22T05:27:08.000Z | pin_kit/extras/pinplay/PinPoints/scripts/regions.py | sawansib/SNIPER | 45ec1eeb09b81a7250bc1a1aaa452f16b2b7f497 | [
"MIT"
] | null | null | null | pin_kit/extras/pinplay/PinPoints/scripts/regions.py | sawansib/SNIPER | 45ec1eeb09b81a7250bc1a1aaa452f16b2b7f497 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# BEGIN_LEGAL
# BSD License
#
# Copyright (c)2014 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer. Redistributions
# in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution. Neither the name of
# the Intel Corporation nor the names of its contributors may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL OR
# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# END_LEGAL
#
#
# @ORIGINAL_AUTHORS: T. Mack Stallcup, Cristiano Pereira, Harish Patil, Chuck Yount
#
#
# Read in a file of frequency vectors (BBV or LDV) and execute one of several
# actions on it. Default is to generate a regions CSV file from a BBV file.
# Other actions include:
# normalizing and projecting FV file to a lower dimension
#
# $Id: regions.py,v 1.11.1.9 2014/06/09 23:30:44 tmstall Exp tmstall $
import datetime
import glob
import optparse
import os
import random
import re
import sys
import cmd_options
import msg
import util
def GetOptions():
"""
Get users command line options/args and check to make sure they are correct.
@return List of options and 3 file pointers: fp_bbv, fp_simp, fp_weight
"""
version = '$Revision: 1.11.1.9 $'; version = version.replace('$Revision: ', '')
ver = version.replace(' $', '')
us = '%prog [options] FVfile'
desc = 'Implements several different actions to process FV (Frequency Vector) files ' \
'such as BBV and LDV files. ' \
'All actions requires a FV file as an argument, while some require additional ' \
'options. ' \
' '\
'--------------------------------------------'\
' '\
'Default action is to generate a regions CSV file (--csv_region), which requires additional '\
'options --region_file and --weight_file. '
parser = optparse.OptionParser(usage=us, version=ver, description=desc)
# Command line options to control script behavior.
#
# import pdb; pdb.set_trace()
cmd_options.csv_region(parser, '')
cmd_options.focus_thread(parser, '')
# cmd_options.bbv_file(parser) # Currently, don't use this option as FV file is required
cmd_options.project_bbv(parser, '')
cmd_options.region_file(parser, '')
cmd_options.weight_file(parser, '')
# Parse command line options and get any arguments.
#
(options, args) = parser.parse_args()
# If user does not chose an action to perform, then run the
# default: region CSV generation
#
if not options.project_bbv:
options.csv_region = True
# Must at least define a FV file.
#
if hasattr(options, 'bbv') and options.bbv_file != '':
bbv_file = options.bbv_file
else:
if len(args) < 1:
msg.PrintAndExit('Must have at least a FVfile as an argument.\n'
'Use -h to get help')
bbv_file = args[0]
# Check to make sure valid FV file exists.
#
# import pdb; pdb.set_trace()
err_msg = lambda string: msg.PrintAndExit('This is not a valid file, ' + string + \
'\nUse -h for help.')
bbv_str = "basic block vector file: "
if hasattr(options, 'bbv_file') and options.bbv_file == '':
bbv_file = args[0]
if not os.path.isfile(bbv_file):
err_msg(bbv_str + bbv_file)
# BBV file must have at least one line which starts with 'T:'.
#
fp_bbv = util.OpenCompressFile(bbv_file)
if fp_bbv == None:
err_msg(bbv_str + bbv_file)
line = fp_bbv.readline()
while not line.startswith('T:') and line != '':
line = fp_bbv.readline()
if not line.startswith('T:'):
err_msg(sim_str + simp_file)
fp_bbv.seek(0,0)
# If required, look for additional files.
#
fp_simp = fp_weight = None
if options.csv_region:
sim_str = "simpoints file: "
weight_str = "weights file: "
simp_file = options.region_file
weight_file = options.weight_file
if not os.path.isfile(simp_file):
err_msg(sim_str + simp_file)
if not os.path.isfile(weight_file):
err_msg(weight_str + weight_file)
# Simpoints file must start with an integer.
#
fp_simp = util.OpenCompressFile(simp_file)
if fp_simp == None:
err_msg(sim_str + simp_file)
line = fp_simp.readline()
l_list = line.split()
if not l_list[0].isdigit():
err_msg(sim_str + simp_file)
# Weight file must either have a floating point number < 1.0 as the first
# value in the file or the first line must be two integers. (The first
# integer are assumed to be '1', i.e. a slice with weight 1. Should never get
# a weight > 1.)
#
fp_weight = util.OpenCompressFile(weight_file)
if fp_weight == None:
err_msg(weight_str + weight_file)
line = fp_weight.readline()
l_list = line.split()
if '.' not in l_list[0] and not re.search('\d\s\d', line):
err_msg(weight_str + weight_file)
fp_simp.seek(0,0)
fp_weight.seek(0,0)
return (options, fp_bbv, fp_simp, fp_weight)
def GetSlice(fp):
"""
Get the frequency vector for one slice (i.e. line in the FV file).
All the frequency vector data for a slice is contained in one line. It
starts with the char 'T'. After the 'T', there should be a sequence of
the following tokens:
':' integer ':' integer
where the first integer is the dimension index and the second integer is
the count for that dimension. Ignore any whitespace.
@return list of the frequency vectors for a slice, element = (dimension, count)
"""
fv = []
line = fp.readline()
while not line.startswith('T:') and line != '':
# print 'Skipping line: ' + line
# Don't want to skip the part of BBV files at the end which give
# information on the basic blocks in the file. If 'Block id:' is
# found, then back up the file pointer to before this string.
#
if line.startswith('Block id:'):
fp.seek(0-len(line), os.SEEK_CUR)
return []
line = fp.readline()
if line == '': return []
blocks = re.findall(':\s*(\d+)\s*:\s*(\d+)\s*', line)
# print 'Slice:'
for block in blocks:
# print block
bb = int(block[0])
count = int(block[1])
fv.append((bb, count))
# import pdb; pdb.set_trace()
return fv
def GetBlockIDs(fp):
"""
Get the information about each basic block which is stored at the end
of BBV frequency files.
Extract the values for fields 'block id' and 'static instructions' from
each block. Here's an example block id entry:
Block id: 2233 0x69297ff1:0x69297ff5 static instructions: 2 block count: 1 block size: 5
@return list of the basic block info, elements are (block_id, icount of block)
"""
block_id = []
line = fp.readline()
while not line.startswith('Block id:') and line != '':
line = fp.readline()
if line == '': return []
while line.startswith('Block id:'):
bb = int(line.split('Block id:')[1].split()[0])
bb -= 1 # Change BBs to use 0 based numbering instead of 1 based
icount = int(line.split('static instructions:')[1].split()[0])
block_id.append((bb, icount))
line = fp.readline()
# import pdb; pdb.set_trace()
return block_id
############################################################################
#
# Functions for generating regions CSV files
#
############################################################################
def GetWeights(fp):
"""
Get the regions and weights from a weights file.
@return lists of regions and weights
"""
weight_list = []
weight_regions = []
for line in fp.readlines():
field = re.match('(0\.\d+).*(\d+)', line)
# Look for the special case where the first field is a single digit
# without the decimal char '.'. This should be the weight of '1'.
#
if field == None:
field = re.match('(\d)\s(\d)', line)
if field:
weight = float(field.group(1))
region = int(field.group(2))
weight_list.insert(region, weight)
weight_regions.append(region)
return weight_list, weight_regions
def GetSimpoints(fp):
"""
Get the regions and slices from the Simpoint file.
@return list of regions and slices from a Simpoint file
"""
slice_list = []
simp_regions = []
for line in fp.readlines():
field = re.match('(\d+).*(\d+)', line)
if field:
slice_num = int(field.group(1))
region = int(field.group(2))
slice_list.insert(region, slice_num)
simp_regions.append(region)
return slice_list, simp_regions
def GetRegionBBV(fp):
"""
Read all the frequency vector slices and the basic block id info from a
basic block vector file. Put the data into a set of lists which are used
in generating CSV regions.
@return cumulative_icount, all_bb, bb_freq, bb_num_instr, region_bbv
"""
# Dictionary which contains the number of instructions in each BB.
# Key is basic block number.
#
bb_num_instr = {}
# Dictionary which contains the number of times a BB was executed
# Key is basic block number.
#
bb_freq = {}
# Currently not set by the function. May use in the future for calculating
# coverage.
#
# List of BB vectors for each representative region. Each element is
# a dictionary keyed on BB number with the icount of the block in that
# specific slice.
#
region_bbv = []
# Set of all BB found in the BBV file. Each element
# is a tuple with the BB number and # of instr in BB.
#
all_bb = []
# List of the cumulative sum of instructions in the slices. There is one
# entry for each slice in the BBV file which contains the total icount up
# to the end of the slice.
#
cumulative_icount = []
# Cumulative sum of instructions so far
#
run_sum = 0
# Get each slice & generate some data on it.
#
while True:
fv = GetSlice(fp)
if fv == []:
break
# print fv
# Get icount for BB in slice and record the cumulative icount.
#
sum = 0
for bb in fv:
count = bb[1]
sum += count
# Add the number instructions for the current BB to total icount for
# this specific BB (bb_num_instr).
#
bb_num_instr[bb] = bb_num_instr.get(bb, 0) + count
# Increment the number of times this BB number has been encountered
#
bb_freq[bb] = bb_freq.get(bb, 0) + 1
if sum != 0:
run_sum += sum
cumulative_icount += [run_sum]
# import pdb; pdb.set_trace()
# Read the basic block information at the end of the file if it exists.
#
# import pdb; pdb.set_trace()
all_bb = GetBlockIDs(fp)
# if all_bb != []:
# print 'Block ids'
# print all_bb
# The list 'all_bb' should contain one entry for each basic block in the
# application (and the corresponding icount). Check to see if there are
# any missing BB entries in the list 'all_bb'. If there are, then add them
# to the list with an icount of 0. Sort the final list so the icount can
# be accessed by BB number in constant time.
#
# import pdb; pdb.set_trace()
if all_bb != []:
all_bb.sort(key=lambda bb: bb[0])
length = len(all_bb)
max_bb_num = all_bb[length-1][0] # Last list entry has the total number of BB
if max_bb_num+1 > length:
# Missing at least one BB entry in the list.
#
array_index = 0 # Used to access the next entry in the list
count = 0 # Used to loop thru the list
while count <= length:
if all_bb[array_index][0] != count:
# Missing this BB entry in the list. Add the missing BB tuple
# with icount = 0
#
all_bb.append((array_index, 0))
count += 1 # Skip the 'missing' entry
array_index += 1
count += 1
all_bb.sort(key=lambda bb: bb[0]) # Sort once missing entries are added
# import pdb; pdb.set_trace()
return cumulative_icount, all_bb, bb_freq, bb_num_instr, region_bbv
def CheckRegions(simp_regions, weight_regions):
"""
Check to make sure the simpoint and weight files contain the same regions.
@return no return value
"""
if len(simp_regions) != len(weight_regions) or \
set(simp_regions) != set(weight_regions):
msg.PrintMsg('ERROR: Regions in these two files are not identical')
msg.PrintMsg(' Simpoint regions: ' + str(simp_regions))
msg.PrintMsg(' Weight regions: ' + str(weight_regions))
cleanup()
sys.exit(-1)
def GenRegionCSV(options, fp_bbv, fp_simp, fp_weight):
"""
Read in three files (BBV, weights, simpoints) and print to stdout
a regions CSV file which defines the representative regions.
@return no return value
"""
# Read data from weights, simpoints and BBV files.
# Error check the regions.
#
weight_list, weight_regions = GetWeights(fp_weight)
slice_list, simp_regions = GetSimpoints(fp_simp)
cumulative_icount, all_bb, bb_freq, bb_num_instr, region_bbv = GetRegionBBV(fp_bbv)
CheckRegions(simp_regions, weight_regions)
total_num_slices = len(cumulative_icount)
total_instr = cumulative_icount[len(cumulative_icount)-1]
# import locale
# locale.setlocale(locale.LC_ALL, "")
# total_instr = locale.format('%d', total_instr, True)
# total_bb_icount = locale.format('%d', total_bb_icount, True)
# Print header information
#
msg.PrintMsgNoCR('# Regions based on: ')
for string in sys.argv:
msg.PrintMsgNoCR(string + ' '),
msg.PrintMsg('')
msg.PrintMsg('# comment,thread-id,region-id,simulation-region-start-icount,simulation-region-end-icount,region-weight')
# msg.PrintMsg('')
# Print region information
#
# import pdb; pdb.set_trace()
if options.focus_thread != -1:
tid = int(options.focus_thread)
else:
tid = 0
total_icount = 0
region = 1 # First region is always numbered 1
for slice_num, weight in zip(slice_list, weight_list):
if slice_num == 0:
# If this is the first slice, set the initial icount to 0
#
start_icount = 0
else:
# Use cumulative icount of previous slice to get the initial
# icount of this slice.
#
start_icount = cumulative_icount[slice_num-1]+1
end_icount = cumulative_icount[slice_num]
length = end_icount - start_icount + 1
total_icount += length
msg.PrintMsg('# Region = %d Slice = %d Icount = %d Length = %d Weight = %.5f' % \
(region, slice_num, start_icount, length, weight))
msg.PrintMsg('Cluster %d from slice %d,%d,%d,%d,%d,%.5f\n' % \
(region-1, slice_num, tid, region, start_icount, end_icount, weight))
region +=1
# Currently does nothing as 'region_bbv' is always null (at least for now.)
#
# Get a set which contains BBs of all representative regions
#
all_region_bb = set()
for bbv in region_bbv:
region_bb = 0
for bb in bbv:
all_region_bb.add(bb)
bb, icount = all_bb[bb-1]
region_bb += int(icount)
print 'Trace coverage: %.4f' % (float(region_bb)/total_instr)
# Get total number of instructions for BBs in representative regions
#
region_bb_icount = 0
for num in all_region_bb:
bb, icount = all_bb[num-1]
region_bb_icount += int(icount)
# Print summary statistics
#
# import pdb; pdb.set_trace()
msg.PrintMsg('# Total instructions in %d regions = %d' % (len(simp_regions), total_icount))
msg.PrintMsg('# Total instructions in workload = %d' % cumulative_icount[total_num_slices-1])
msg.PrintMsg('# Total slices in workload = %d' % total_num_slices)
# msg.PrintMsg('# Overall dynamic coverage of workload by these regions = %.4f' \
# % (float(region_bb_icount)/total_bb_icount))
############################################################################
#
# Functions for normalization and projection
#
############################################################################
def GetDimRandomVector(proj_matrix, proj_dim, dim):
"""
Get the random vector for dimension 'dim'. If it's already in 'proj_matrix',
then just return it. Otherwise, generate a new random vector of length
'proj_dim' with values between -1 and 1.
@return list of length 'dim' which contains vector of random values
"""
# import pdb; pdb.set_trace()
if proj_matrix.has_key(dim):
# print 'Using random vector: %4d' % dim
vector = proj_matrix.get(dim)
else:
# print 'Generating random vector: %4d' % dim
random.seed() # Use default source for seed
vector = []
index = 0
while index < proj_dim:
vector.append(random.uniform(-1, 1))
index += 1
proj_matrix[dim] = vector
return vector
def ProjectFVFile(fp, proj_dim=15):
"""
Read all the slices in a frequency vector file, normalize them and use a
random projection matrix to project them onto a result matrix with dimensions:
num_slices x proj_dim.
@return list of lists which contains the result matrix
"""
# Dictionary which contains the random projection matrix. The keys are the
# FV dimension (NOT the slice number) and the value is a list of random
# values with length 'proj_dim'.
#
proj_matrix = {}
# List of lists which contains the result matrix. One element for each slice.
#
result_matrix = []
while True:
fv = GetSlice(fp)
if fv == []:
break
# Get the sum of all counts for this slice for use in normalizing the
# dimension counts.
#
# import pdb; pdb.set_trace()
# print fv
vector_sum = 0
for block in fv:
vector_sum += block[1]
# Initilize this slice/vector of the result matrix to zero
#
result_vector = [0] * proj_dim
# For each element in the slice, project using the "dimension of the
# element", not the element index itself!
#
sum = 0
# import pdb; pdb.set_trace()
for block in fv:
dim = block[0]
# print 'Dim: %4d' % dim
count = float(block[1]) / vector_sum # Normalize freq count
# Get the random vector for the dimension 'dim' and project the values for
# 'dim' into the result
#
proj_vector = GetDimRandomVector(proj_matrix, proj_dim, dim)
index = 0
while index < proj_dim:
result_vector[index] += count * proj_vector[index]
index += 1
result_matrix.append(result_vector)
# import pdb; pdb.set_trace()
return result_matrix
def PrintFloatMatrix(matrix):
"""
Print a matrix composed of a list of list of floating point values.
@return no return value.
"""
index = 0
while index < len(matrix):
slice = matrix[index]
for block in slice:
# print '%6.8f' % block,
print '%6.3f' % block,
print
index += 1
def cleanup():
"""
Close all open files and any other cleanup required.
@return no return value
"""
fp_bbv.close()
if fp_simp:
fp_simp.close()
if fp_weight:
fp_weight.close()
############################################################################
options, fp_bbv, fp_simp, fp_weight = GetOptions()
if options.project_bbv:
result_matrix = ProjectFVFile(fp_bbv)
PrintFloatMatrix(result_matrix)
else:
GenRegionCSV(options, fp_bbv, fp_simp, fp_weight)
cleanup()
sys.exit(0)
| 33.809006 | 123 | 0.606715 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11,606 | 0.533046 |
803a46dade15dfe7d529009beb897901bfbdb1e7 | 2,918 | py | Python | pydy/codegen/code.py | jcrist/pydy | ec139f0dcbeffba8242636b727b3be02091792b0 | [
"BSD-3-Clause"
] | 1 | 2019-06-27T05:30:36.000Z | 2019-06-27T05:30:36.000Z | pydy/codegen/code.py | jcrist/pydy | ec139f0dcbeffba8242636b727b3be02091792b0 | [
"BSD-3-Clause"
] | null | null | null | pydy/codegen/code.py | jcrist/pydy | ec139f0dcbeffba8242636b727b3be02091792b0 | [
"BSD-3-Clause"
] | 1 | 2019-06-27T05:29:50.000Z | 2019-06-27T05:29:50.000Z | #!/usr/bin/env python
"""This module remains for backwards compatibility reasons and will be
removed in PyDy 0.4.0."""
import warnings
from .ode_function_generators import generate_ode_function as new_gen_ode_func
with warnings.catch_warnings():
warnings.simplefilter('once')
warnings.warn("This module, 'pydy.codgen.code', is deprecated. The "
"function 'generate_ode_function' can be found in the "
"'pydy.codegen.ode_function_generator' module. "
"'CythonGenerator' has been removed, use "
"'pydy.codegen.cython_code.CythonMatrixGenerator' "
"instead.",
DeprecationWarning)
class CythonGenerator(object):
def __init__(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter('once')
warnings.warn("'CythonGenerator' has been removed, use "
"'pydy.codegen.cython_code.CythonMatrixGenerator' "
"instead.", DeprecationWarning)
def generate_ode_function(mass_matrix, forcing_vector, constants,
coordinates, speeds, specified=None,
generator='lambdify'):
"""Returns a numerical function which can evaluate the right hand side
of the first order ordinary differential equations from a system
described by:
M(constants, coordinates) x' = F(constants, coordinates, speeds, specified)
Parameters
----------
mass_matrix : sympy.Matrix, shape(n,n)
The symbolic mass matrix of the system. The rows should correspond
to the coordinates and speeds.
forcing_vector : sympy.Matrix, shape(n,1)
The symbolic forcing vector of the system.
constants : list of sympy.Symbol
The constants in the equations of motion.
coordinates : list of sympy.Function
The generalized coordinates of the system.
speeds : list of sympy.Function
The generalized speeds of the system.
specified : list of sympy.Function
The specifed quantities of the system.
generator : string, {'lambdify'|'theano'|'cython'}, optional
The method used for generating the numeric right hand side.
Returns
-------
evaluate_ode_function : function
A function which evaluates the derivaties of the states.
"""
with warnings.catch_warnings():
warnings.simplefilter('once')
warnings.warn("This function is deprecated and will be removed in "
"PyDy 0.4.0. Use the the new 'generate_ode_function' "
"in 'pydy.codegen.ode_function_generator'",
DeprecationWarning)
return new_gen_ode_func(forcing_vector, coordinates, speeds, constants,
mass_matrix=mass_matrix, specifieds=specified,
generator=generator)
| 39.972603 | 79 | 0.643934 | 358 | 0.122687 | 0 | 0 | 0 | 0 | 0 | 0 | 1,796 | 0.61549 |
803aabf6aa2864fa437dfdfe7d60ccff3ba16ead | 12,795 | py | Python | mle_hyperopt/utils/comms.py | RobertTLange/mle-hyperopt | 692fee1e8e3d761962307c0894b308a00fa41d9c | [
"MIT"
] | 3 | 2021-10-31T14:02:37.000Z | 2021-11-03T11:22:19.000Z | mle_hyperopt/utils/comms.py | RobertTLange/mle-hyperopt | 692fee1e8e3d761962307c0894b308a00fa41d9c | [
"MIT"
] | null | null | null | mle_hyperopt/utils/comms.py | RobertTLange/mle-hyperopt | 692fee1e8e3d761962307c0894b308a00fa41d9c | [
"MIT"
] | 1 | 2021-10-30T17:45:26.000Z | 2021-10-30T17:45:26.000Z | from datetime import datetime
import numpy as np
from rich.console import Console
from rich.table import Table
from rich import box
from rich.align import Align
from typing import List, Optional, Union
console_width = 80
def welcome_message(
space_data: List[dict],
search_type: str,
fixed_params: Optional[dict] = None,
) -> None:
"""Print startup configuration of search space.
Args:
space_data (List[dict]): List of search variable descriptions.
search_type (str): Name of search strategy
fixed_params (Optional[dict], optional):
Fixed parameter names and values. Defaults to None.
"""
console = Console(width=console_width)
table = Table(show_footer=False)
table.add_column(":sunflower: Variable", no_wrap=True)
table.add_column("Type")
table.add_column("Search Range :left_right_arrow:")
table.title = "MLE-Hyperopt " + search_type + " Hyperspace :rocket:"
for row in space_data:
table.add_row(*list(row.values()))
if fixed_params is not None:
for k, v in fixed_params.items():
table.add_row(k, "fixed", str(v))
table.columns[2].justify = "left"
table.columns[2].header_style = "bold red"
table.columns[2].style = "red"
table.row_styles = ["none"]
table.box = box.SIMPLE
console.print(Align.center(table))
def update_message(
total_eval_id: int,
best_eval_id: List[int],
best_config: List[dict],
best_eval: List[Union[float, np.ndarray]],
best_ckpt: Optional[List[str]],
best_batch_eval_id: List[int],
best_batch_config: List[dict],
best_batch_eval: List[Union[float, np.ndarray]],
best_batch_ckpt: Optional[List[str]],
) -> None:
"""Print current best performing configurations.
Args:
total_eval_id (int): Number of total evaluations so far.
best_eval_id (List[int]): ID of top-k performing evaluations.
best_config (List[dict]): Top-k performing parameter configurations.
best_eval (List[float, np.ndarray]): Top-k performance values.
best_ckpt (Optional[List[str]]): Top-k checkpoint paths.
best_batch_eval_id (List[int]): Top-k performing evaluations in batch.
best_batch_config (List[dict]): Top-k performing configurations in batch.
best_batch_eval (List[float, np.ndarray]):
Top-k performance values in batch.
best_batch_ckpt (Optional[List[str]]): Top-k checkpoint paths in batch.
"""
time_t = datetime.now().strftime("%m/%d/%Y %H:%M:%S")
console = Console(width=console_width)
table = Table(show_header=True)
table.add_column(f":inbox_tray: Total: {total_eval_id}", style="dim")
table.add_column("ID")
table.add_column("Obj. :chart_with_downwards_trend:")
table.add_column(f"Configuration :bookmark: - {time_t}")
print()
for i in range(len(best_eval_id)):
best_e = np.round_(best_eval[i], 3)
for k, v in best_config[i].items():
if type(v) == float:
best_config[i][k] = np.round_(v, 3)
best_c = dict(best_config[i])
if best_ckpt is not None:
best_c["ckpt"] = best_ckpt[i]
table.add_row(
"Best Overall", str(best_eval_id[i]), str(best_e), str(best_c)[1:-1]
)
# Add row(s) for best config(s) in batch
for i in range(len(best_eval_id)):
best_batch_e = np.round_(best_batch_eval[i], 3)
for k, v in best_batch_config[i].items():
if type(v) == float:
best_batch_config[i][k] = np.round_(v, 3)
best_b_c = dict(best_batch_config[i])
if best_batch_ckpt is not None:
best_b_c["ckpt"] = best_batch_ckpt[i]
table.add_row(
"Best in Batch",
str(best_batch_eval_id[i]),
str(best_batch_e),
str(best_b_c)[1:-1],
)
console.print(Align.center(table))
def ranking_message(
best_eval_ids: List[int],
best_configs: List[dict],
best_evals: List[Union[float, np.ndarray]],
) -> None:
"""Print top-k performing configurations.
Args:
best_eval_ids (List[int]): ID of top-k performing evaluations.
best_configs (List[dict]): Top-k performing parameter configurations.
best_evals (List[float, np.ndarray]): Top-k performance values.
"""
# Ensure that update data is list to loop over
if type(best_eval_ids) in [int, np.int64]:
best_eval_ids = [best_eval_ids]
if type(best_configs) == dict:
best_configs = [best_configs]
if type(best_evals) in [float, int]:
best_evals = [best_evals]
console = Console(width=console_width)
table = Table(show_header=True)
table.add_column(f":1st_place_medal: Rank", style="dim")
table.add_column("ID")
table.add_column("Obj. :chart_with_downwards_trend:")
table.add_column("Configuration :bookmark:")
for i in range(len(best_configs)):
# Round all the values for prettier printing
if type(best_evals[i]) == np.ndarray:
best_evals[i] = best_evals[i].tolist()
best_eval = [
round(best_evals[i][j], 3) for j in range(len(best_evals[i]))
]
else:
best_eval = round(best_evals[i], 3)
for k, v in best_configs[i].items():
if type(v) == float:
best_configs[i][k] = round(v, 3)
table.add_row(
f"{i+1}",
str(best_eval_ids[i]),
str(best_eval),
str(best_configs[i])[1:-1],
)
console.print(Align.center(table))
def print_grid_hello(num_total_configs: int, num_dims_grid: int) -> None:
"""Hello message specific to grid search.
Args:
num_total_configs (int): Number of total configurations in grid.
num_dims_grid (int): Number of variables to search over.
"""
console = Console(width=console_width)
console.log(
f"Start running {num_dims_grid}D grid with "
f"{num_total_configs} total configurations."
)
def print_halving_hello(
num_sh_batches: int,
evals_per_batch: List[int],
iters_per_batch: List[int],
halving_coeff: int,
num_total_iters: int,
) -> None:
"""Hello message specific to SH search.
Args:
num_sh_batches (int): Total number of SH batches.
evals_per_batch (List[int]): List of number of evaluations per batch.
iters_per_batch (List[int]): List of number of iterations per batch.
halving_coeff (int): Halving coefficient.
num_total_iters (int): Number of total evaluations at the end of search.
"""
console = Console(width=console_width)
console.log(
f"Start running {num_sh_batches} batches of Successive Halving."
)
console.log(f"➞ Configurations per batch: {evals_per_batch}")
console.log(f"➞ Iterations per batch: {iters_per_batch}")
console.log(f"➞ Halving coefficient: {halving_coeff}")
console.log(f"➞ Total Number of Iterations: {num_total_iters}")
console.log(
f"➞ Batch No. 1/{num_sh_batches}: {evals_per_batch[0]} configs for"
f" {iters_per_batch[0]} iters."
)
return
def print_halving_update(
sh_counter: int,
num_sh_batches: int,
evals_per_batch: List[int],
iters_per_batch: List[int],
num_total_iters: int,
) -> None:
"""Update message specific to SH search.
Args:
sh_counter (int): Number of completed SH batches.
num_sh_batches (int): Total number of SH batches.
evals_per_batch (List[int]): List of number of evaluations per batch.
iters_per_batch (List[int]): List of number of iterations per batch.
num_total_iters (int): Number of total evaluations at the end of search.
"""
console = Console(width=console_width)
done_iters = np.sum(
np.array(evals_per_batch)[:sh_counter]
* np.array(iters_per_batch)[:sh_counter]
)
console.log(
f"Completed {sh_counter}/{num_sh_batches} batches of SH ➢"
f" {done_iters}/{num_total_iters} iters."
)
if sh_counter < num_sh_batches:
console.log(
f"➞ Next - Batch No. {sh_counter+1}/{num_sh_batches}:"
f" {evals_per_batch[sh_counter]} configs for"
f" {iters_per_batch[sh_counter]} iters."
)
def print_hyperband_hello(
num_hb_loops: int,
sh_num_arms: List[int],
sh_budgets: List[int],
num_hb_batches: int,
evals_per_batch: List[int],
) -> None:
"""Hello message specific to Hyperband search.
Args:
num_hb_loops (int): Number of total SH loops in hyperband.
sh_num_arms (List[int]): List of active bandit arms in all SH loops.
sh_budgets (List[int]): List of iteration budgets in all SH loops.
num_hb_batches (int): Number of total job batches in hyperband search.
evals_per_batch (List[int]): List of number of jobs in all batches.
"""
console = Console(width=console_width)
console.log(
f"Start running {num_hb_batches} batches of Hyperband evaluations."
)
console.log(f"➞ Evals per batch: {evals_per_batch}")
console.log(
f"➞ Total SH loops: {num_hb_loops} | Arms per loop: {sh_num_arms}"
)
console.log(f"➞ Min. budget per loop: {sh_budgets}")
console.log(
f"➞ Start Loop No. 1/{num_hb_loops}: {sh_num_arms[0]} arms &"
f" {sh_budgets[0]} min budget."
)
def print_hyperband_update(
hb_counter: int,
num_hb_loops: int,
sh_num_arms: List[int],
sh_budgets: List[int],
num_hb_batches: int,
hb_batch_counter: int,
evals_per_batch: List[int],
) -> None:
"""Update message specific to Hyperband search.
Args:
hb_counter (int): Number of completed SH loops in hyperband.
num_hb_loops (int): Number of total SH loops in hyperband.
sh_num_arms (List[int]): List of active bandit arms in all SH loops.
sh_budgets (List[int]): List of iteration budgets in all SH loops.
num_hb_batches (int): Number of total job batches in hyperband search.
hb_batch_counter (int): Number of completed job batches.
evals_per_batch (List[int]): List of number of jobs in all batches.
"""
console = Console(width=console_width)
console.log(
f"Completed {hb_batch_counter}/{num_hb_batches} of Hyperband evaluation"
" batches."
)
console.log(f"➞ Done with {hb_counter}/{num_hb_loops} loops of SH.")
if hb_counter < num_hb_loops:
console.log(
f"➞ Active Loop No. {hb_counter + 1}/{num_hb_loops}:"
f" {sh_num_arms[hb_counter]} arms & {sh_budgets[hb_counter]} min"
" budget."
)
console.log(
f"➞ Next batch of SH: {evals_per_batch[hb_batch_counter]} evals."
)
def print_pbt_hello(
num_workers: int,
steps_until_ready: int,
explore_type: str,
exploit_type: str,
) -> None:
"""Hello message specific to PBT search.
Args:
num_workers (int): Number of synchronous PBT workers.
steps_until_ready (int): Number of (SGD) steps between PBT iterations.
explore_type (str): Exploration strategy name.
exploit_type (str): Exploitation strategy name.
"""
console = Console(width=console_width)
console.log(f"Start running PBT w. {num_workers} workers.")
console.log(f"➞ Steps until ready: {steps_until_ready}")
console.log(f"➞ Exploration strategy: {explore_type}")
console.log(f"➞ Exploitation strategy: {exploit_type}")
def print_pbt_update(
step_counter: int, num_total_steps: int, copy_info: dict
) -> None:
"""Update message specific to PBT search.
Args:
step_counter (int): Number of completed PBT batches.
num_total_steps (int): Number of total steps (e.g. SGD intervals).
copy_info (dict): Info about which worker exploited/explored.
"""
console = Console(width=console_width)
console.log(f"Completed {step_counter} batches of PBT.")
console.log(f"➞ Number of total steps: {num_total_steps}")
for w_id in range(len(copy_info)):
if w_id != copy_info[w_id]["copy_id"]:
console.log(
f"➞ 👨🚒 W{w_id} (P:"
f" {round(copy_info[w_id]['old_performance'], 3)}) exploits"
f" W{copy_info[w_id]['copy_id']} (P:"
f" {round(copy_info[w_id]['copy_performance'], 3)})"
)
console.log(f"-- E/E Params: {copy_info[w_id]['copy_params']}")
else:
console.log(
f"➞ 👨🚒 W{w_id} (P:"
f" {round(copy_info[w_id]['old_performance'], 3)}) continues"
" own trajectory."
)
console.log(f"-- Old Params: {copy_info[w_id]['copy_params']}")
| 36.452991 | 81 | 0.637124 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,480 | 0.504241 |
803c9afaa645ec27e821bad4d70d8355430146c8 | 4,009 | py | Python | nn_ood/data/lastnames.py | dtch1997/SCOD | f79df5097989b4bfc1c7f4cb8f51c86f708f974c | [
"MIT"
] | 10 | 2021-05-13T03:52:18.000Z | 2022-03-23T19:34:35.000Z | nn_ood/data/lastnames.py | dtch1997/SCOD | f79df5097989b4bfc1c7f4cb8f51c86f708f974c | [
"MIT"
] | null | null | null | nn_ood/data/lastnames.py | dtch1997/SCOD | f79df5097989b4bfc1c7f4cb8f51c86f708f974c | [
"MIT"
] | 4 | 2021-05-30T09:12:50.000Z | 2021-11-09T23:56:11.000Z | from __future__ import unicode_literals, print_function, division
from io import open
import glob
import os
import torch
import unicodedata
import string
import random
import numpy as np
def findFiles(path): return glob.glob(path)
all_letters = string.ascii_letters + " .,;'"
n_letters = len(all_letters)
# Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
# Build the category_lines dictionary, a list of names per language
category_lines = {}
all_categories = []
# Read a file and split into lines
def readLines(filename):
lines = open(filename, encoding='utf-8').read().strip().split('\n')
return [unicodeToAscii(line) for line in lines]
for filename in findFiles('/home/apoorva/datasets/names/*.txt'):
category = os.path.splitext(os.path.basename(filename))[0]
all_categories.append(category)
lines = readLines(filename)
category_lines[category] = lines
n_categories = len(all_categories)
######################################################################
# Turning Names into Tensors
# --------------------------
#
# Now that we have all the names organized, we need to turn them into
# Tensors to make any use of them.
#
# To represent a single letter, we use a "one-hot vector" of size
# ``<1 x n_letters>``. A one-hot vector is filled with 0s except for a 1
# at index of the current letter, e.g. ``"b" = <0 1 0 0 0 ...>``.
#
# To make a word we join a bunch of those into a 2D matrix
# ``<line_length x 1 x n_letters>``.
#
# That extra 1 dimension is because PyTorch assumes everything is in
# batches - we're just using a batch size of 1 here.
#
# Find letter index from all_letters, e.g. "a" = 0
def letterToIndex(letter):
return all_letters.find(letter)
# Just for demonstration, turn a letter into a <1 x n_letters> Tensor
def letterToTensor(letter):
tensor = torch.zeros(1, n_letters)
tensor[0][letterToIndex(letter)] = 1
return tensor
# Turn a line into a <line_length x 1 x n_letters>,
# or an array of one-hot letter vectors
def lineToTensor(line):
tensor = torch.zeros(len(line), 1, n_letters)
for li, letter in enumerate(line):
tensor[li][0][letterToIndex(letter)] = 1
return tensor
def randomChoice(l):
return l[random.randint(0, len(l) - 1)]
def randomTrainingExample():
category = randomChoice(all_categories)
line = randomChoice(category_lines[category])
category_tensor = torch.tensor([all_categories.index(category)], dtype=torch.long)
line_tensor = lineToTensor(line)
return category, line, category_tensor, line_tensor
class LastNames(torch.utils.data.Dataset):
def __init__(self, split, N=None):
super().__init__()
self.split = split
if split == "train":
self.categories = ['French','Dutch']
elif split == "val":
self.categories = ['French','Dutch']
elif split == "ood":
self.categories = ['Chinese', 'Japanese', 'Korean']
self.K = len(self.categories)
self.N = 1000
if N is not None:
self.N = min(N, 1000)
def __len__(self):
return self.N
def __getitem__(self, i):
target = np.random.choice(self.K)
category = self.categories[target]
line = randomChoice(category_lines[category])
# target = target % 5
target = torch.Tensor([ target % 2 ] )
line = lineToTensor(line)
return line, target
def TensorToLine(self, line):
line = line.detach().cpu().numpy()
line = np.argmax(line,axis=-1)
line_str = ''
for idx in line[:,0]:
line_str += line_str.join(all_letters[int(idx)])
return line_str
def TargetToCategory(self, target):
return self.categories[target] | 30.838462 | 92 | 0.641307 | 1,241 | 0.309554 | 0 | 0 | 0 | 0 | 0 | 0 | 1,222 | 0.304814 |
803e4182cc11eec12d785bce525dec0268a1a586 | 749 | py | Python | sdk/identity/azure-identity/tests/test_imds_credential_async.py | anuchandy/azure-sdk-for-python | 589b9890554ebf261aa2184e8f1c6507f01a207c | [
"MIT"
] | null | null | null | sdk/identity/azure-identity/tests/test_imds_credential_async.py | anuchandy/azure-sdk-for-python | 589b9890554ebf261aa2184e8f1c6507f01a207c | [
"MIT"
] | null | null | null | sdk/identity/azure-identity/tests/test_imds_credential_async.py | anuchandy/azure-sdk-for-python | 589b9890554ebf261aa2184e8f1c6507f01a207c | [
"MIT"
] | null | null | null | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from azure.identity.aio._credentials.managed_identity import ImdsCredential
import pytest
from helpers_async import AsyncMockTransport
@pytest.mark.asyncio
async def test_imds_close():
transport = AsyncMockTransport()
credential = ImdsCredential(transport=transport)
await credential.close()
assert transport.__aexit__.call_count == 1
@pytest.mark.asyncio
async def test_imds_context_manager():
transport = AsyncMockTransport()
credential = ImdsCredential(transport=transport)
async with credential:
pass
assert transport.__aexit__.call_count == 1
| 24.16129 | 75 | 0.691589 | 0 | 0 | 0 | 0 | 456 | 0.608812 | 414 | 0.552737 | 147 | 0.196262 |
803e7af427386e573026718af646b210d5adc2f3 | 7,576 | py | Python | imutils/Stitcher.py | sunjxan/pyimagesearch | 6ba14f0fadb23364d9b320981c5984e4842be361 | [
"Apache-2.0"
] | null | null | null | imutils/Stitcher.py | sunjxan/pyimagesearch | 6ba14f0fadb23364d9b320981c5984e4842be361 | [
"Apache-2.0"
] | null | null | null | imutils/Stitcher.py | sunjxan/pyimagesearch | 6ba14f0fadb23364d9b320981c5984e4842be361 | [
"Apache-2.0"
] | null | null | null | import cv2
import numpy as np
class Stitcher:
def stitch(self, images, ratio=.75, reprojThresh=4.0, showMatches=False):
imagesCount = len(images)
if imagesCount == 0:
return
if imagesCount == 1:
return images[0]
result = images[-1]
for i in range(imagesCount - 1, 0, -1):
result = self._stitch_two_images(imagesCount - i, images[i - 1], result, ratio, reprojThresh, showMatches)
if result is None:
return
return result
def _stitch_two_images(self, index, imageL, imageR, ratio, reprojThresh, showMatches):
hL, wL = imageL.shape[:2]
hR, wR = imageR.shape[:2]
# SIFT获得关键点和特征向量
kpsL, featuresL = self.detectAndDescribe(imageL)
kpsR, featuresR = self.detectAndDescribe(imageR)
# 匹配两个图像中的特征
M = self.matchKeypoints(kpsL, kpsR, featuresL, featuresR, ratio, reprojThresh)
if M is None:
return
matches, H, status = M
# 透视变换,将右边图片变换
result = cv2.warpPerspective(imageR, H, ((wL + wR), max(hL, hR)))
# 再将左边图片覆盖在上层
result[0:hL, 0:wL] = imageL
# 获得拼接结果的外界矩形
gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)
cnts, hier = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
maxContour = max(cnts, key=cv2.contourArea)
x, y, w, h = cv2.boundingRect(maxContour)
result = result[y:y+h, x:x+w]
# 检查是否可视化关键点匹配
if showMatches:
vis = self.drawMatches(imageL, imageR, kpsL, kpsR, matches, status)
cv2.imshow("Matches {}".format(index), vis)
cv2.waitKey(1)
return result
def detectAndDescribe(self, image):
# 检测关键点并提取特征向量
descriptor = cv2.SIFT_create()
kps, features = descriptor.detectAndCompute(image, None)
# 从关键点对象中获取坐标
kps = np.array([kp.pt for kp in kps], dtype=np.float32)
return kps, features
def matchKeypoints(self, kpsL, kpsR, featuresL, featuresR, ratio, reprojThresh):
# 特征匹配器,暴力穷举策略
matcher = cv2.DescriptorMatcher_create("BruteForce")
# k近邻匹配,为featuresL中每个点在featuresR中寻找k个最近邻,结果列表中由近到远排列
# 每个匹配项中queryIdx表示目标的featuresL下标,trainIdx表示目标的featuresR下标,distance表示两个关键点欧几里得距离
rawMatches = matcher.knnMatch(featuresL, featuresR, 2)
matches = []
# 过滤假阳性匹配项
for m in rawMatches:
# Lowe's ratio test,检测有唯一最近邻
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
matches.append((m[0].queryIdx, m[0].trainIdx))
# 计算单应性至少需要4个匹配项
if len(matches) > 4:
# 构造两组点
ptsL, ptsR = [], []
for queryIdx, trainIdx in matches:
ptsL.append(kpsL[queryIdx])
ptsR.append(kpsR[trainIdx])
ptsL = np.array(ptsL, dtype=np.float32)
ptsR = np.array(ptsR, dtype=np.float32)
# 计算两组点之间的单应性,返回变换矩阵H将关键点B投影到关键点A
# 如果把左边图片按对应点变换到右边图片,结果图片展示不完全,
# 所以应该将右边图片变换到左边图片
H, status = cv2.findHomography(ptsR, ptsL, cv2.RANSAC, reprojThresh)
return matches, H, status
return
def drawMatches(self, imageL, imageR, kpsL, kpsR, matches, status):
# 绘制两个图像之间的关键点对应关系
hL, wL = imageL.shape[:2]
hR, wR = imageR.shape[:2]
vis = np.zeros((max(hL, hR), wL + wR, 3), dtype=np.uint8)
vis[0:hL, 0:wL] = imageL
vis[0:hR, wL:] = imageR
for ((queryIdx, trainIdx), s) in zip(matches, status):
# 仅在关键点成功后处理匹配
if s == 1:
ptL = round(kpsL[queryIdx, 0].item()), round(kpsL[queryIdx, 1].item())
ptR = round(kpsR[trainIdx, 0].item()) + wL, round(kpsR[trainIdx, 1].item())
cv2.line(vis, ptL, ptR, (0, 255, 0), 1)
return vis
def removeBlackBorder(self, image, showAnimate=False, winname=None):
def drawAnimate(mask, time):
# 预览外接矩形内的拼接结果
cv2.imshow(winname, cv2.bitwise_and(image, image, mask=mask))
cv2.waitKey(time)
# 四周加上10像素的黑色边框,保证可以从四个方向进行腐蚀
image = cv2.copyMakeBorder(image, 10, 10, 10, 10, cv2.BORDER_CONSTANT, (0, 0, 0))
# 得到目标区域轮廓
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)
cnts, hier = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
maxContour = max(cnts, key=cv2.contourArea)
# 得到目标区域外接矩形
x, y, w, h = cv2.boundingRect(maxContour)
boundingRect = np.zeros(thresh.shape, dtype=np.uint8)
cv2.rectangle(boundingRect, (x, y), (x + w - 1, y + h - 1), 255, -1)
if showAnimate:
drawAnimate(boundingRect, 1000)
# 外接矩形减去目标区域
sub = cv2.subtract(boundingRect, thresh)
# 腐蚀外接矩形,每次向内缩减1像素,直到完全在目标区域内部
while cv2.countNonZero(sub) > 0:
boundingRect = cv2.erode(boundingRect, None)
if showAnimate:
drawAnimate(boundingRect, 30)
sub = cv2.subtract(boundingRect, thresh)
# 得到新的矩形轮廓
cnts, hier = cv2.findContours(boundingRect, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
maxContour = max(cnts, key=cv2.contourArea)
nX, nY, nW, nH = cv2.boundingRect(maxContour)
left = nX
right = nX + nW
top = nY
bottom = nY + nH
# 1. 分别从左右两个方向对内部矩形进行膨胀,找到满足条件的最大矩形
while left > x:
left = left - 1
boundingRect[top:bottom, left] = 255
if showAnimate:
drawAnimate(boundingRect, 30)
sub = cv2.subtract(boundingRect, thresh)
if cv2.countNonZero(sub) > 0:
boundingRect[top:bottom, left] = 0
if showAnimate:
drawAnimate(boundingRect, 30)
left = left + 1
break
while right < x + w:
right = right + 1
boundingRect[top:bottom, right - 1] = 255
if showAnimate:
drawAnimate(boundingRect, 30)
sub = cv2.subtract(boundingRect, thresh)
if cv2.countNonZero(sub) > 0:
boundingRect[top:bottom, right - 1] = 0
if showAnimate:
drawAnimate(boundingRect, 30)
right = right - 1
break
# 2. 分别从上下两个方向对内部矩形进行膨胀,找到满足条件的最大矩形
while top > y:
top = top - 1
boundingRect[top, left:right] = 255
if showAnimate:
drawAnimate(boundingRect, 30)
sub = cv2.subtract(boundingRect, thresh)
if cv2.countNonZero(sub) > 0:
boundingRect[top, left:right] = 0
if showAnimate:
drawAnimate(boundingRect, 30)
top = top + 1
break
while bottom < y + h:
bottom = bottom + 1
boundingRect[bottom - 1, left:right] = 255
if showAnimate:
drawAnimate(boundingRect, 30)
sub = cv2.subtract(boundingRect, thresh)
if cv2.countNonZero(sub) > 0:
boundingRect[bottom - 1, left:right] = 0
if showAnimate:
drawAnimate(boundingRect, 30)
bottom = bottom - 1
break
# 获得没有黑色区域的拼接结果
return image[top:bottom, left:right] | 36.956098 | 118 | 0.562302 | 8,495 | 0.996364 | 0 | 0 | 0 | 0 | 0 | 0 | 1,605 | 0.188248 |
803f3e78ea2014f7c662ee3a5d6517f238a79624 | 4,339 | py | Python | tests/bugs/core_3365_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | tests/bugs/core_3365_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | tests/bugs/core_3365_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | #coding:utf-8
#
# id: bugs.core_3365
# title: Extend syntax for ALTER USER CURRENT_USER
# decription:
# Replaced old code: removed EDS from here as it is not needed at all:
# we can use here trivial "connect '$(DSN)' ..." instead.
# Non-privileged user is created in this test and then we check that
# he is able to change his personal data: password, firstname and any of
# TAGS key-value pair (avaliable in Srp only).
#
# Checked on 4.0.0.1635: OK, 3.773s; 3.0.5.33180: OK, 2.898s.
#
# tracker_id: CORE-3365
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = [('[ \t]+', ' '), ('=', '')]
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set bail on;
set count on;
-- Drop any old account with name = 'TMP$C3365' if it remains from prevoius run:
set term ^;
execute block as
begin
begin
execute statement 'drop user tmp$c3365 using plugin Srp' with autonomous transaction;
when any do begin end
end
begin
execute statement 'drop user tmp$c3365 using plugin Legacy_UserManager' with autonomous transaction;
when any do begin end
end
end^
set term ;^
commit;
set width usrname 10;
set width firstname 10;
set width sec_plugin 20;
set width sec_attr_key 20;
set width sec_attr_val 20;
set width sec_plugin 20;
recreate view v_usr_info as
select
su.sec$user_name as usrname
,su.sec$first_name as firstname
,su.sec$plugin as sec_plugin
,sa.sec$key as sec_attr_key
,sa.sec$value as sec_attr_val
from sec$users su left
join sec$user_attributes sa using(sec$user_name, sec$plugin)
where su.sec$user_name = upper('tmp$c3365');
commit;
grant select on v_usr_info to public;
commit;
create user tmp$c3365 password 'Ir0nM@n' firstname 'John'
using plugin Srp
tags (initname='Ozzy', surname='Osbourne', groupname='Black Sabbath', birthday = '03.12.1948')
;
commit;
select 'before altering' as msg, v.* from v_usr_info v;
commit;
connect '$(DSN)' user tmp$c3365 password 'Ir0nM@n';
alter current user
set password 'H1ghWaySt@r' firstname 'Ian'
using plugin Srp
tags (initname='Ian', surname='Gillan', groupname='Deep Purple', drop birthday)
;
commit;
connect '$(DSN)' user tmp$c3365 password 'H1ghWaySt@r';
commit;
select 'after altering' as msg, v.* from v_usr_info v;
commit;
connect '$(DSN)' user SYSDBA password 'masterkey';
drop user tmp$c3365 using plugin Srp;
commit;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
MSG USRNAME FIRSTNAME SEC_PLUGIN SEC_ATTR_KEY SEC_ATTR_VAL
=============== ========== ========== ==================== ==================== ====================
before altering TMP$C3365 John Srp BIRTHDAY 03.12.1948
before altering TMP$C3365 John Srp GROUPNAME Black Sabbath
before altering TMP$C3365 John Srp INITNAME Ozzy
before altering TMP$C3365 John Srp SURNAME Osbourne
Records affected: 4
MSG USRNAME FIRSTNAME SEC_PLUGIN SEC_ATTR_KEY SEC_ATTR_VAL
============== ========== ========== ==================== ==================== ====================
after altering TMP$C3365 Ian Srp GROUPNAME Deep Purple
after altering TMP$C3365 Ian Srp INITNAME Ian
after altering TMP$C3365 Ian Srp SURNAME Gillan
Records affected: 3
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| 33.898438 | 108 | 0.567181 | 0 | 0 | 0 | 0 | 183 | 0.042176 | 0 | 0 | 3,862 | 0.890067 |
803fc0980572bedb582462a274dae0d462a1eb72 | 241 | py | Python | sololearn/hovercraft.py | ehlodex/Python3 | 126c4662d1371ec6cbc1f257bd3de5c1dcdc86a6 | [
"MIT"
] | null | null | null | sololearn/hovercraft.py | ehlodex/Python3 | 126c4662d1371ec6cbc1f257bd3de5c1dcdc86a6 | [
"MIT"
] | null | null | null | sololearn/hovercraft.py | ehlodex/Python3 | 126c4662d1371ec6cbc1f257bd3de5c1dcdc86a6 | [
"MIT"
] | null | null | null | #!/usr/bin/env/ python3
"""SoloLearn > Code Coach > Hovercraft"""
sales = int(input('How many did you sell? ')) * 3
expense = 21
if sales > expense:
print('Profit')
elif sales < expense:
print('Loss')
else:
print('Broke Even')
| 18.538462 | 49 | 0.630705 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.477178 |
8040774ec7da6d6e9a3a5ad4d793d25841c16e92 | 2,522 | py | Python | RSAMessengerDapp/User/views.py | slothmanxyz/RSAMessengerDapp | 3c5966196cac7749ea87ce0f42c47d159eb2ad14 | [
"MIT"
] | null | null | null | RSAMessengerDapp/User/views.py | slothmanxyz/RSAMessengerDapp | 3c5966196cac7749ea87ce0f42c47d159eb2ad14 | [
"MIT"
] | null | null | null | RSAMessengerDapp/User/views.py | slothmanxyz/RSAMessengerDapp | 3c5966196cac7749ea87ce0f42c47d159eb2ad14 | [
"MIT"
] | 1 | 2021-04-05T13:27:02.000Z | 2021-04-05T13:27:02.000Z | from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.forms import AuthenticationForm
from web3 import Web3
from .forms import SignupForm
from Key.models import Key
#The views and templates in this app are placeholders. Will use the ones in the Pages app instead later.
#Currently deployed to local hardhat network only
provider = Web3.HTTPProvider('http://127.0.0.1:8545/')
web3 = Web3(provider)
# Create your views here.
def home_view(request):
context={}
if not request.user.is_authenticated:
return render(request, 'User/home.html', context)
else:
return redirect('dashboard')
def dashboard_view(request):
context = {}
if not request.user.is_authenticated:
return redirect('home')
else:
context['username'] = request.user.username
context['address'] = request.user.address
context['balance'] = web3.fromWei(web3.eth.get_balance(request.user.address), 'ether')
context['keys'] = Key.objects.filter(user=request.user,is_main_key=True)
return render(request, 'User/dashboard.html', context)
def signup_view(request):
context = {}
if request.POST:
form = SignupForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password1')
user = authenticate(username=username,password=password)
login(request,user)
return redirect('home')
else:
context['signup_form'] = form
else:
form = SignupForm()
context['signup_form'] = form
return render(request, 'User/signup.html', context)
def login_view(request):
context = {}
if request.POST:
form = AuthenticationForm(request=request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username,password=password)
if user is not None:
login(request,user)
return redirect('home')
else:
context['login_form'] = form
else:
form = AuthenticationForm()
context['login_form'] = form
return render(request, 'User/login.html', context)
def logout_request(request):
logout(request)
return redirect('home') | 34.081081 | 104 | 0.657811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 441 | 0.174861 |
8041201eb138a5c9f79a0273b50738c537af71ad | 398 | py | Python | app/__init__.py | Paulvitalis200/Store-Manager-API | d61e91bff7fc242da2a93d1caf1012465c7c904a | [
"MIT"
] | null | null | null | app/__init__.py | Paulvitalis200/Store-Manager-API | d61e91bff7fc242da2a93d1caf1012465c7c904a | [
"MIT"
] | 4 | 2018-10-21T18:28:03.000Z | 2018-10-24T12:48:24.000Z | app/__init__.py | Paulstar200/Store-Manager-API | d61e91bff7fc242da2a93d1caf1012465c7c904a | [
"MIT"
] | null | null | null | from flask import Flask, Blueprint
from flask_jwt_extended import JWTManager
def create_app(config):
app = Flask(__name__)
from instance.config import app_config
app.config.from_object(app_config[config])
app.config['JWT_SECRET_KEY'] = 'jwt-secret-string'
from .api.V1 import productsale_api as psa
app.register_blueprint(psa)
jwt = JWTManager(app)
return app
| 22.111111 | 54 | 0.738693 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.08794 |
804199f0f26ad9829b5e2a973c124bc20bbb89c2 | 918 | py | Python | scripts/MSP_IMPROV_to_12_folds.py | cnut1648/Multimodal-Transformer | 8b86590b4d14dcd9e72ee2c9da9668a458780a16 | [
"MIT"
] | null | null | null | scripts/MSP_IMPROV_to_12_folds.py | cnut1648/Multimodal-Transformer | 8b86590b4d14dcd9e72ee2c9da9668a458780a16 | [
"MIT"
] | null | null | null | scripts/MSP_IMPROV_to_12_folds.py | cnut1648/Multimodal-Transformer | 8b86590b4d14dcd9e72ee2c9da9668a458780a16 | [
"MIT"
] | null | null | null | """
preprocess MSP IMPROV csv
run after MSP_IMPROV.py
"""
import os, torch, soundfile
from pathlib import Path
import librosa
import pandas as pd
ID2LABEL = {
0: "neu", 1: "sad", 2: "ang", 3: "hap"
}
pwd = Path(__file__).parent
csv_dir = pwd / "../data/datasets/MSP-IMPROV"
out_dir = pwd / "../data/datasets/MSP-IMPROV_12fold"
os.makedirs(out_dir, exist_ok=True)
# can compute stat here
csv_path = csv_dir / f"post_session{1}.csv"
dataset = pd.read_csv(csv_path)
for sessionid in [2, 3, 4, 5, 6]:
csv_path = csv_dir / f"post_session{sessionid}.csv"
dataset = dataset.append( pd.read_csv(csv_path))
dataset.reset_index(inplace=True)
for fold in range(1, 7):
for gender in ["M", "F"]:
partial = dataset[dataset["speaker"] == f"{gender}0{fold}"]
assert len(partial) > 0
partial.to_csv(
str(out_dir / f"post_session{fold}{gender}.csv"), index=False
)
print()
| 27 | 73 | 0.662309 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 283 | 0.308279 |
8043c3df7727468e10027ab3c916c11597ab2643 | 19,038 | py | Python | User/User.py | howiemac/evoke4 | 5d7af36c9fb23d94766d54c9c63436343959d3a8 | [
"BSD-3-Clause"
] | null | null | null | User/User.py | howiemac/evoke4 | 5d7af36c9fb23d94766d54c9c63436343959d3a8 | [
"BSD-3-Clause"
] | null | null | null | User/User.py | howiemac/evoke4 | 5d7af36c9fb23d94766d54c9c63436343959d3a8 | [
"BSD-3-Clause"
] | null | null | null | """ evoke base User object
IHM 2/2/2006 and thereafter
CJH 2012 and therafter
gives session-based user validation
The database users table must have an entry with uid==1 and id==guest.
This is used to indicate no valid login.
The database users table must have an entry with uid==2 . This is the
sys admin user.
Registration is verifed via email.
Where a user has a stage of "" (the default), this indicates that they
have not yet had their registration verified, and they will be unable
to login.
"""
import time
import re
import inspect
import crypt
import uuid
import hashlib
from base64 import urlsafe_b64encode as encode, urlsafe_b64decode as decode
from base import lib
from base.render import html
class User:
def permitted(self,user):
"permitted if own record or got edit permit"
return self.uid==user.uid or user.can('edit user')
@classmethod
def hashed(self, pw, salt=None):
"return a hashed password prepended with a salt, generated if not specified"
salt = salt or uuid.uuid4().hex
return hashlib.sha512(salt.encode()+pw.encode()).hexdigest()+':'+salt
def check_password(self, pw):
"fetch pw from database, split into salt and hash then compare against the pw supplied"
hashed = self.pw or self.hashed("")
#salt, hash = hashed[:19], hashed[19:]
hash,salt = hashed.split(':')
return self.hashed(pw, salt) == hashed
@classmethod
def fetch_user(cls,id):
"return User object for given id, or return None if not found"
users=cls.list(id=id)
return users and users[0] or None
@classmethod
def fetch_user_by_email(cls,email):
"return User object for given email, or return None if not found"
users=cls.list(email=email)
return users and users[0] or None
@classmethod
def fetch_if_valid(cls,id, pw):
"authenticate password and id - return validated user instance"
if id:
user=cls.fetch_user(id)
# print "VERIFIED",user.id,user.pw,id,pw, " mode:",getattr(user,'mode','NO MODE')
if user and user.check_password(pw) and (user.stage=='verified'):
return user #valid
return None #invalid
@classmethod
def create(cls,req):
"create a new user, using data from req"
self=cls.new()
self.store(req)#update and flush
return self
def store(self,req):
"update a user, using data from req"
self.update(req)
self.flush()
return self
def remove(self,req):
"delete an unverified user - called from Page_registrations.evo"
if self.stage!='verified':
self.delete()
req.message='"%s" has been deleted' % self.id
return self.view(req)
remove.permit='edit user'
def send_email(self,subject,body):
""
print "email: ", self.Config.mailfrom,self.email
lib.email(self.Config.mailfrom,self.email,subject,body)
###### permits ########################
def is_guest(self):
""
return self.uid==1
as_guest=is_guest # this can be overridden elsewhere, to allow an "as_guest" mode, for non-guest users
def is_admin(self):
"system admin?"
return self.uid==2
def can(self,what):
"""
permit checker - replacement for ob.allowed() which is no more (RIP...)
- `what` can be a permit, in the form "task group"
- `what` can be a method, in which case the permit of that method is checked, and the permitted() method of its class.
- old form method permits (ie "group.task") are also supported
- a user can have a master group, which gives unlimited access
DO NOT CALL THIS METHOD FROM WITHIN A CLASS permitted METHOD or RECURSION WILL BE INFINITE!
"""
if "master" in self.get_permits():
return 1
if inspect.ismethod(what):
permit = getattr(what.im_func, 'permit', None)
if permit=='guest':
return 1 # ok regardless, if explicit guest permit
if type(what).__name__=='instancemethod':
if not (inspect.isclass(what.im_self) or what.im_self.permitted(self)):
# print ">>>>>>>>>>>>> method",what,'NOT PERMITTED'
return 0
if not permit:
return 1 #ok if permitted and no permit set
else:
permit=what
if permit.find('.')>-1: #retro compatibility
group,task = permit.split(".",1)
else:
task,group = permit.split(" ",1)
# print ">>>>>>>>>>>>> string",what,task,group,task in self.get_permits().get(group,[]),self.get_permits().get(group,[])
return task in self.get_permits().get(group,[])
def get_permits(self):
"returns the permits for a user, as a dictionary of {group:[tasks]}"
if not hasattr(self,"permits"):
self.permits={}
for k,v in ((i['group'],i['task']) for i in self.Permit.list(asObjects=False, user=self.uid)):
if k in self.permits:
self.permits[k].append(v)
else:
self.permits[k]=[v]
return self.permits
def store_permits(self):
"stores the permit dictionary (group:[tasks]}"
# clear out existing permits for this user (only those in Config.permits, as other permits may be there also, and these should be retained)
for group,tasks in self.Config.permits.items():
self.list(asObjects=False,sql='delete from %s where user="%s" and `group`="%s" and task in %s' % (self.Permit.table,self.uid,group,lib.sql_list(tasks)))
# store the new permits
for group,tasks in self.permits.items():
for task in tasks: # store the permit
permit = self.Permit.new()
permit.user = self.uid
permit.group = group
permit.task = task
permit.flush()
def sorted_permit_items(self):
"sorts Config.permits.items() so that master comes first"
return sorted(self.Config.permits.items(),(lambda x,y:(x[0]=='master' or x<y) and -1 or 0))
def create_permits(self):
"creates permits"
self.stage='verified'
self.flush()
self.permits=self.Config.default_permits #set opening permits
self.store_permits()
###################### user validation ######################
def hook(self,req,ob,method,url):
"""req hook - to allow apps to add attributes to req
This is called by dispatch.py, for req.user, immediately after calling req.user.refresh() - so
req.user can alse be modifed reliably via this hook.
"""
pass
refresh=hook # backwards compatibility (IHM 2014), in case refresh has been overridden by an app
@classmethod
def validate_user(cls,req):
"hook method to allow <app>.User subclass to override the default validation and permit setting"
req.user=cls.validated_user(req)
req.user.get_permits()
# print "req.user set to: ",req.user
@classmethod
def validated_user(cls, req):
"""login validation is now handled by Twisted.cred. If we have got this far
then the password has been successfully checked and the users id is
available as req.request.avatarId
"""
user= cls.Session.fetch_user(req)
# print "VALIDATED USER:",user.id
# play around with cookies
if user.uid>1 and req.get("evokeLogin"):
#found a valid user in the request, so set the cookies
forever = 10*365*24*3600 # 10 years on
# req.set_cookie('evokeID',user.cookie_data(),expires=req.get("keepLogin") and forever or None)
if req.get('evokePersist'): #user wants name remembered
# print "REMEMBER ME"
req.set_cookie('evokePersist',user.id,expires=forever)
elif req.cookies.get('evokePersist')==user.id: #user no longer wants name remembered
req.clear_cookie('evokePersist')
return user
def login_failure(self,req):
"checks login form entries for validity - this is called only for guest user, sometime after validate_user().."
if '__user__' in req: #we must have logged in and failed login validation to get here
user=self.fetch_user(req.__user__)
if user and not user.stage:
req.error='registration for "%s" has not yet been verified' % req.__user__
else: # CJH: not good practice to distinguish which of username and password is valid, so....
req.error="username or password is invalid - please try again - have you registered?"
return 1
return 0 #we have a guest and not a login failure
######################## form handlers #######################
def login(self,req):
""
return self.login_form(req)
login.permit="guest"
def logout(self, req):
"expire the user and password cookie"
req.clear_cookie('evokeID')
req.request.getSession().expire()
if req.return_to:
return req.redirect(req.return_to)
req.message='%s has been logged out' % req.user.id
return req.redirect(self.fetch_user('guest').url('login')) #use redirect to force clean new login
def register(self,req):
"create new user record"
if self.Config.registration_method=='admin': # registration by admin only
if not req.user.can('edit user'):
return self.error(req,'access denied - registration must be done by admin')
if 'pass2' in req: #form must have been submitted, so process it
uob=self.fetch_user(req.username)
eob=self.fetch_user_by_email(req.email)
retry=(req.redo==req.username) and uob and (not uob.stage)
if not req.username:
req.error='please enter a username'
elif uob and not retry:
req.error='username "%s" is taken, please try another' % req.username
elif not re.match('.*@.*' ,req.email):
req.error='please enter a valid email address'
elif eob and ((not retry) or (eob.uid!=uob.uid)):
req.error='you already have a login for this email address'
elif not req.pass1:
req.error='please enter a password'
elif req.pass2!=req.pass1:
req.error='passwords do not match - please re-enter'
else: #must be fine
uob=uob or self.new()
uob.id=req.username
uob.pw=self.hashed(req.pass1) # hash the password
uob.email=req.email
uob.when=lib.DATE()
uob.flush() #store the new user
key=uob.verification_key()
site=self.get_sitename(req)
if self.Config.registration_method=='admin':
# registration by admin only
return uob.verify_manually(req)
elif self.Config.registration_method=='approve':
# registration with admin approval
# (O/S : this should maybe give email confirmation to the new user when admin verifies them?)
admin=self.get(2) #O/S we should allow a nominated other with 'user edit' permit to act as admin for this purpose....
text="""
Hi %s
%s wants to register with us at %s, and gives the following introduction:
-----------------------
%s
-----------------------
To approve their registration, simply click the link below:
-----------------------
http://%s%s
-----------------------
""" % (admin.id,req.username,site,req.story,req.get_host(),(self.class_url('verify?key=%s') % key))
lib.email(self.Config.mailfrom,admin.email,subject="%s registration verification" % site,text=text)#send the email
return self.get(1).registration_requested(req)
################################################
#else we assume that registration_method is 'self' (the default)
# registration with self confirmation via email
text="""
Hi %s
Thanks for registering with us at %s. We look forward to seeing you around the site.
To complete your registration, you need to confirm that you got this email. To do so, simply click the link below:
-----------------------
http://%s%s
-----------------------
If clicking the link doesn't work, just copy and paste the entire address into your browser. If you're still having problems, simply forward this email to %s and we'll do our best to help you.
Welcome to %s.
""" % (req.username,site,req.get_host(),(self.class_url('verify?key=%s') % key),self.Config.mailto,site)
print "!!!!!!!! REGISTRATION !!!!!!!!:%s:%s" % (req.username,key)
lib.email(self.Config.mailfrom,req.email,subject="%s registration verification" % site,text=text)#send the email
req.message='registration of "%s" accepted' % req.username
return self.get(1).registered_form(req)
return self.register_form(req)
register.permit="guest" #dodge the login validation
def verify(cls,req):
"called from registration email to complete the registration process"
try:
#check key
# prepare key - need to strip whitespace and make sure the length
# is a multiple of 4
key = req.key.strip()
if len(key) % 4:
key = key + ('=' * (4 - len(key)%4))
req.key = key
try:
uid,id,pw=decode(req.key).split(',')
except:
uid,id,pw=decode(req.key+'=').split(',') # bodge it... some browsers dont return a trailing '='
# print '>>>>>',uid,id,pw
self=cls.get(int(uid))
if (self.id==id) and (self.pw==pw):
if not self.stage: # not already verified, so ..
req.__user__=id
req.__pass__=pw
self.create_permits()
if self.Config.registration_method=='self':
self.validate_user(req) #create the login cookie
return req.redirect(self.url("view?message=%s" % lib.url_safe('your registration has been verified'))) #use redirect to force clean new login
else:
return req.redirect(self.url("view?message=%s" % lib.url_safe('registration of "%s" has been verified' % id)))
except:
raise
return self.error('verification failure')
verify.permit='guest'
verify=classmethod(verify)
def verify_manually(self,req):
"manually verify a registration"
if not self.stage:
self.create_permits()
req.message='registration for "%s" has been verified' % self.id
return self.view(req)
verify_manually.permit='edit user'
def verification_key(self):
""
return encode("%s,%s,%s" % (self.uid,self.id,self.pw))
# TODO - password reset mechanism
def reminder(self,req):
"send password reminder email"
return ''
#self.logout(req)
# print "User.reminder"
if 'id' in req or 'email' in req: #form must have been submitted, so process it
# User.reminder req has id or email
if not (req.id or req.email):
req.error='please enter a registered username or email address'
else:
user=self.fetch_user(req.id) or self.fetch_user_by_email(req.email)
# print "User.reminder user=", user, user.uid, user.email
if not user:
req.error='%s is not registered' % (req.id and "username" or "email address",)
else: #must be fine!
user.send_email('%s password reminder' % user.id,'your password for %s is: %s' % (req.get_host(),user.pw))
req.message='your password has been emailed to you'
return req.redirect(self.Page.get(1).url('view?message=%s' % lib.url_safe(req.message))) # redirect to check permissions
return self.reminder_form(req)
reminder.permit="guest" #dodge the login validation
###### user admin ######################
def edit(self, req):
"edit user details, including permits"
if 'pass2' in req: #form must have been submitted, so process it
if self.uid==req.user.uid:#ie if editing your own permissions
req['user.edit']=1 #for safety - dont allow you to lose your own security access
if 'pw' in req and not req.pw: #no password entered, so don't change it
del req["pw"]
if self.Config.user_email_required and not re.match('.*@.*' ,req.email):
req.error='please enter a valid email address'
elif self.Config.user_email_required and (self.email!=req.email) and self.fetch_user_by_email(req.email):
req.error='you already have a login for this email address'
elif req.pass2!=req.pass1:
req.error='passwords do not match - please re-enter'
else: #must be fine!
if (self.uid>2) and req.user.can('edit user'): # if not admin user, and can edit users, then update permits
self.permits={}
for group,tasks in self.Config.permits.items():
for task in tasks:
if req.get(group+'.'+task):
if group in self.permits:
self.permits[group].append(task)
else:
self.permits[group]=[task]
self.store_permits()
if req.pass1:
self.pw=self.hashed(req.pass1)
self.store(req)
req.message='details updated for "%s"' % self.id
#following not needed for session-based login
## if self.uid==req.user.uid:
# if self.pw!=req.user.pw:#user is altering own details, so fix the login
# req.__user__=self.id
# req.__pass__=self.pw
# self.validate_user(req) #create the login cookie
return self.finish_edit(req) #redirects appropriately
return self.edit_form(req)
edit.permit='edit user'
def finish_edit(self,req):
"returns to user menu (if allowed)"
if req.user.can('edit user'):
return self.redirect(req,'registrations')
return self.redirect(req)
########## utilities ########
def get_HTML_title(self,ob,req):
"HTML title - used by wrappers - uses req.title if it exists, otherwise ob.get_title() if it exists"
return "%s %s" % (self.get_sitename(req),req.title or (hasattr(ob,"get_title") and ob.get_title()) or "",)
def get_sitename(self,req):
"used in emails, HTML title etc."
return self.Config.sitename or req.get_host()
########## landing places ##################
@classmethod
def welcome(self,req):
"the welcome page, when no object/instance is specified in the URL"
if req.return_to:
return req.redirect(req.return_to)
return req.redirect(self.Page.get(self.Config.default_page).url())
# or use this if Page is not installed or in use:
# return self.get(1).view(req)
def view(self,req):
""
if self.uid==1:
return self.registrations(req)
return self.edit_form(req)
home=view
################# errors and messages ################
@classmethod
def error(self,req,errormsg=''):
""
req.error=errormsg or req.error or 'undefined error'
try:
return req.user.error_form(req)
except:
return req.error
@classmethod
def ok(self,req,msg=''):
""
req.message=msg or req.message or ''
return req.user.error_form(req)
######################## forms #######################
@html
def error_form(self,req):
pass
@html
def login_form(self,req):
req.title='login'
@html
def register_form(self,req):
pass
@html
def registered_form(self,req):
pass
@html
def registration_requested(self,req):
pass
@html
def registrations(self,req):
"listing of user registrations, allowing verification"
req.items=self.list(orderby='uid desc')
registrations.permit='edit user'
@html
def reminder_form(self,req):
pass
@html
def edit_form(self,req):
pass
| 35.718574 | 193 | 0.643082 | 18,316 | 0.962076 | 0 | 0 | 3,263 | 0.171394 | 0 | 0 | 9,007 | 0.473106 |
804416e6e25ce3ed4c5dc637ff0b100cabf78eb4 | 242 | py | Python | richcomments/templatetags/richcomments.py | praekelt/django-richcomments | e1b2e123bf46135fd2bdf8fa810e4995e641db72 | [
"BSD-3-Clause"
] | 2 | 2015-01-22T19:16:06.000Z | 2015-04-28T19:12:45.000Z | richcomments/templatetags/richcomments.py | praekelt/django-richcomments | e1b2e123bf46135fd2bdf8fa810e4995e641db72 | [
"BSD-3-Clause"
] | null | null | null | richcomments/templatetags/richcomments.py | praekelt/django-richcomments | e1b2e123bf46135fd2bdf8fa810e4995e641db72 | [
"BSD-3-Clause"
] | null | null | null | from django import template
from django.template.loader import render_to_string
register = template.Library()
@register.simple_tag
def richcomments_static():
return render_to_string('richcomments/templatetags/richcomments_static.html')
| 26.888889 | 81 | 0.834711 | 0 | 0 | 0 | 0 | 129 | 0.533058 | 0 | 0 | 52 | 0.214876 |
80445a8b0077e05e95163bce0920494788da568d | 1,203 | py | Python | rosys/pathplanning/obstacle_map_demo.py | zauberzeug/rosys | 10271c88ffd5dcc4fb8eec93d46fe4144a9e40d8 | [
"MIT"
] | 1 | 2022-02-20T08:21:07.000Z | 2022-02-20T08:21:07.000Z | rosys/pathplanning/obstacle_map_demo.py | zauberzeug/rosys | 10271c88ffd5dcc4fb8eec93d46fe4144a9e40d8 | [
"MIT"
] | 1 | 2022-03-08T12:46:09.000Z | 2022-03-08T12:46:09.000Z | rosys/pathplanning/obstacle_map_demo.py | zauberzeug/rosys | 10271c88ffd5dcc4fb8eec93d46fe4144a9e40d8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from nicegui import ui
import pylab as pl
import numpy as np
import time
from rosys.world.pose import Pose
from rosys.world.spline import Spline
from grid import Grid
from robot_renderer import RobotRenderer
from obstacle_map import ObstacleMap
import plot_tools as pt
grid = Grid((30, 40, 36), (0.45, -0.05, 4.0, 3.0))
obstacles = [
[0.5, 1.5, 1.4, 0.1],
[2.7, 1.5, 1.8, 0.1],
]
robot_renderer = RobotRenderer.from_size(0.77, 1.21, 0.445)
t = time.time()
obstacle_map = ObstacleMap.from_list(grid, obstacles, robot_renderer)
ui.label('%.3f ms' % ((time.time() - t) * 1000))
start = [1.0, 0.5, 0]
end = [2.3, 0.9, np.deg2rad(90)]
spline = Spline.from_poses(
Pose(x=start[0], y=start[1], yaw=start[2]),
Pose(x=end[0], y=end[1], yaw=end[2]),
)
with ui.plot():
pt.show_obstacle_map(obstacle_map)
pl.autoscale(False)
pt.plot_robot(robot_renderer, start, 'C3' if obstacle_map.test(*start) else 'C2')
pt.plot_robot(robot_renderer, end, 'C3' if obstacle_map.test(*end) else 'C2')
pt.plot_spline(spline, 'C3' if obstacle_map.test_spline(spline) else 'C2')
with ui.plot():
pl.imshow(obstacle_map.dist_stack[:, :, 9], cmap=pl.cm.gray)
ui.run()
| 28.642857 | 85 | 0.682461 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.045719 |
8044c8aa6dbc56f89bc318d636c18413c449c80a | 599 | py | Python | code/modern-python/quadratic.py | dushyantkhosla/testing4ds | e6f69f7ff46225a491da00ac994e036633d0ca64 | [
"MIT"
] | null | null | null | code/modern-python/quadratic.py | dushyantkhosla/testing4ds | e6f69f7ff46225a491da00ac994e036633d0ca64 | [
"MIT"
] | null | null | null | code/modern-python/quadratic.py | dushyantkhosla/testing4ds | e6f69f7ff46225a491da00ac994e036633d0ca64 | [
"MIT"
] | null | null | null | from math import sqrt
from typing import Tuple
def quadratic(a: float, b: float, c: float) -> Tuple[float, float]:
'''Compute roots of the quadratic equation:
a*x**2 + b*x + c = 0
For example:
>>> x1, x2 = quadratic(a=4, b=11, c=7)
>>> x1
-1.0
>>> x2
-1.75
>>> 4*x1**2 + 11*x1 + 7
0.0
>>> 4*x2**2 + 11*x2 + 7
0.0
'''
discriminant = sqrt(b**2.0 - 4.0*a*c)
x1 = (-b + discriminant) / (2.0 * a)
x2 = (-b - discriminant) / (2.0 * a)
return x1, x2
| 23.96 | 67 | 0.42571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 336 | 0.560935 |
804522328bccd7ddf45bddcd59e540005384feed | 391 | py | Python | Python/973.py | JWang169/LintCodeJava | b75b06fa1551f5e4d8a559ef64e1ac29db79c083 | [
"CNRI-Python"
] | 1 | 2020-12-10T05:36:15.000Z | 2020-12-10T05:36:15.000Z | Python/973.py | JWang169/LintCodeJava | b75b06fa1551f5e4d8a559ef64e1ac29db79c083 | [
"CNRI-Python"
] | null | null | null | Python/973.py | JWang169/LintCodeJava | b75b06fa1551f5e4d8a559ef64e1ac29db79c083 | [
"CNRI-Python"
] | 3 | 2020-04-06T05:55:08.000Z | 2021-08-29T14:26:54.000Z | import heapq
class Solution:
def kClosest(self, points: List[List[int]], K: int) -> List[List[int]]:
results = []
heap = []
for x, y in points:
dist = x * x + y * y
heapq.heappush(heap, (dist, x, y))
for i in range(K):
point = heapq.heappop(heap)
results.append([point[1], point[2]])
return results | 32.583333 | 75 | 0.503836 | 377 | 0.964194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
804795ddc70fcb743a2b2214a7d1fe74c8e9ad6c | 2,236 | py | Python | tests/test_sphnf.py | JohnEdChristensen/NiggliOptimize | e90b8c66e7b7e560c460502ee24991af775c625b | [
"MIT"
] | null | null | null | tests/test_sphnf.py | JohnEdChristensen/NiggliOptimize | e90b8c66e7b7e560c460502ee24991af775c625b | [
"MIT"
] | null | null | null | tests/test_sphnf.py | JohnEdChristensen/NiggliOptimize | e90b8c66e7b7e560c460502ee24991af775c625b | [
"MIT"
] | null | null | null | import pytest
import numpy as np
"""
def test_mono_39():
from pg_comp.base_mono import *
with open("tests/test_output/base_mono_1_200_n.out","r") as f:
n_500 = int(f.readline().strip())
srHNFs = []
for n in range(1,201):
temp = base_mono_37_39(n)
for t in temp:
if len(t) >0:
srHNFs.append(t)
assert len(srHNFs) == n_500
brute = []
with open("tests/test_output/base_mono_39_1_200_srHNFs.out","r") as f:
HNF = []
for line in f:
if len(line.strip().split()) == 0:
brute.append(HNF)
HNF = []
else:
HNF.append([int(i) for i in line.strip().split()])
for t in srHNFs:
assert t in brute
def test_mono_29():
from pg_comp.base_mono import *
with open("tests/test_output/base_mono_1_200_n.out","r") as f:
n_500 = int(f.readline().strip())
srHNFs = []
for n in range(1,201):
temp = base_mono_29_30(n)
for t in temp:
if len(t) >0:
srHNFs.append(t)
assert len(srHNFs) == n_500
brute = []
with open("tests/test_output/base_mono_29_1_200_srHNFs.out","r") as f:
HNF = []
for line in f:
if len(line.strip().split()) == 0:
brute.append(HNF)
HNF = []
else:
HNF.append([int(i) for i in line.strip().split()])
for t in srHNFs:
assert t in brute
def test_mono_28():
from pg_comp.base_mono import *
with open("tests/test_output/base_mono_1_200_n.out","r") as f:
n_500 = int(f.readline().strip())
srHNFs = []
for n in range(1,201):
temp = base_mono_28(n)
for t in temp:
if len(t) >0:
srHNFs.append(t)
assert len(srHNFs) == n_500
brute = []
with open("tests/test_output/base_mono_28_1_200_srHNFs.out","r") as f:
HNF = []
for line in f:
if len(line.strip().split()) == 0:
brute.append(HNF)
HNF = []
else:
HNF.append([int(i) for i in line.strip().split()])
for t in srHNFs:
assert t in brute
"""
| 27.268293 | 74 | 0.515206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,201 | 0.984347 |